[llvm] r353956 - [Tests] Rename some test files for consistency

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Wed Feb 13 09:23:11 PST 2019


Author: reames
Date: Wed Feb 13 09:23:11 2019
New Revision: 353956

URL: http://llvm.org/viewvc/llvm-project?rev=353956&view=rev
Log:
[Tests] Rename some test files for consistency

Most are named "atomic-something" so rename the few which were "atomic_something".  I keep typing the wrong name due to the inconsistency.  :)


Added:
    llvm/trunk/test/CodeGen/X86/atomic-add.ll
      - copied unchanged from r353955, llvm/trunk/test/CodeGen/X86/atomic_add.ll
    llvm/trunk/test/CodeGen/X86/atomic-idempotent.ll
      - copied unchanged from r353955, llvm/trunk/test/CodeGen/X86/atomic_idempotent.ll
    llvm/trunk/test/CodeGen/X86/atomic-mi.ll
      - copied unchanged from r353955, llvm/trunk/test/CodeGen/X86/atomic_mi.ll
    llvm/trunk/test/CodeGen/X86/atomic-op.ll
      - copied unchanged from r353955, llvm/trunk/test/CodeGen/X86/atomic_op.ll
Removed:
    llvm/trunk/test/CodeGen/X86/atomic_add.ll
    llvm/trunk/test/CodeGen/X86/atomic_idempotent.ll
    llvm/trunk/test/CodeGen/X86/atomic_mi.ll
    llvm/trunk/test/CodeGen/X86/atomic_op.ll

Removed: llvm/trunk/test/CodeGen/X86/atomic_add.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/atomic_add.ll?rev=353955&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/atomic_add.ll (original)
+++ llvm/trunk/test/CodeGen/X86/atomic_add.ll (removed)
@@ -1,219 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs | FileCheck %s
-; RUN: llc < %s -mtriple=x86_64-- -mattr=slow-incdec -verify-machineinstrs | FileCheck %s --check-prefix SLOW_INC
-
-; rdar://7103704
-
-define void @sub1(i32* nocapture %p, i32 %v) nounwind ssp {
-entry:
-; CHECK-LABEL: sub1:
-; CHECK: subl
-  %0 = atomicrmw sub i32* %p, i32 %v monotonic
-  ret void
-}
-
-define void @inc4(i64* nocapture %p) nounwind ssp {
-entry:
-; CHECK-LABEL: inc4:
-; CHECK: incq
-; SLOW_INC-LABEL: inc4:
-; SLOW_INC-NOT: incq
-  %0 = atomicrmw add i64* %p, i64 1 monotonic
-  ret void
-}
-
-define void @add8(i64* nocapture %p) nounwind ssp {
-entry:
-; CHECK-LABEL: add8:
-; CHECK: addq $2
-  %0 = atomicrmw add i64* %p, i64 2 monotonic
-  ret void
-}
-
-define void @add4(i64* nocapture %p, i32 %v) nounwind ssp {
-entry:
-; CHECK-LABEL: add4:
-; CHECK: addq
-  %0 = sext i32 %v to i64		; <i64> [#uses=1]
-  %1 = atomicrmw add i64* %p, i64 %0 monotonic
-  ret void
-}
-
-define void @inc3(i8* nocapture %p) nounwind ssp {
-entry:
-; CHECK-LABEL: inc3:
-; CHECK: incb
-; SLOW_INC-LABEL: inc3:
-; SLOW_INC-NOT: incb
-  %0 = atomicrmw add i8* %p, i8 1 monotonic
-  ret void
-}
-
-define void @add7(i8* nocapture %p) nounwind ssp {
-entry:
-; CHECK-LABEL: add7:
-; CHECK: addb $2
-  %0 = atomicrmw add i8* %p, i8 2 monotonic
-  ret void
-}
-
-define void @add3(i8* nocapture %p, i32 %v) nounwind ssp {
-entry:
-; CHECK-LABEL: add3:
-; CHECK: addb
-  %0 = trunc i32 %v to i8		; <i8> [#uses=1]
-  %1 = atomicrmw add i8* %p, i8 %0 monotonic
-  ret void
-}
-
-define void @inc2(i16* nocapture %p) nounwind ssp {
-entry:
-; CHECK-LABEL: inc2:
-; CHECK: incw
-; SLOW_INC-LABEL: inc2:
-; SLOW_INC-NOT: incw
-  %0 = atomicrmw add i16* %p, i16 1 monotonic
-  ret void
-}
-
-define void @add6(i16* nocapture %p) nounwind ssp {
-entry:
-; CHECK-LABEL: add6:
-; CHECK: addw $2
-  %0 = atomicrmw add i16* %p, i16 2 monotonic
-  ret void
-}
-
-define void @add2(i16* nocapture %p, i32 %v) nounwind ssp {
-entry:
-; CHECK-LABEL: add2:
-; CHECK: addw
-	%0 = trunc i32 %v to i16		; <i16> [#uses=1]
-  %1 = atomicrmw add i16* %p, i16 %0 monotonic
-  ret void
-}
-
-define void @inc1(i32* nocapture %p) nounwind ssp {
-entry:
-; CHECK-LABEL: inc1:
-; CHECK: incl
-; SLOW_INC-LABEL: inc1:
-; SLOW_INC-NOT: incl
-  %0 = atomicrmw add i32* %p, i32 1 monotonic
-  ret void
-}
-
-define void @add5(i32* nocapture %p) nounwind ssp {
-entry:
-; CHECK-LABEL: add5:
-; CHECK: addl $2
-  %0 = atomicrmw add i32* %p, i32 2 monotonic
-  ret void
-}
-
-define void @add1(i32* nocapture %p, i32 %v) nounwind ssp {
-entry:
-; CHECK-LABEL: add1:
-; CHECK: addl
-  %0 = atomicrmw add i32* %p, i32 %v monotonic
-  ret void
-}
-
-define void @dec4(i64* nocapture %p) nounwind ssp {
-entry:
-; CHECK-LABEL: dec4:
-; CHECK: decq
-; SLOW_INC-LABEL: dec4:
-; SLOW_INC-NOT: decq
-  %0 = atomicrmw sub i64* %p, i64 1 monotonic
-  ret void
-}
-
-define void @sub8(i64* nocapture %p) nounwind ssp {
-entry:
-; CHECK-LABEL: sub8:
-; CHECK: subq $2
-  %0 = atomicrmw sub i64* %p, i64 2 monotonic
-  ret void
-}
-
-define void @sub4(i64* nocapture %p, i32 %v) nounwind ssp {
-entry:
-; CHECK-LABEL: sub4:
-; CHECK: subq
-	%0 = sext i32 %v to i64		; <i64> [#uses=1]
-  %1 = atomicrmw sub i64* %p, i64 %0 monotonic
-  ret void
-}
-
-define void @dec3(i8* nocapture %p) nounwind ssp {
-entry:
-; CHECK-LABEL: dec3:
-; CHECK: decb
-; SLOW_INC-LABEL: dec3:
-; SLOW_INC-NOT: decb
-  %0 = atomicrmw sub i8* %p, i8 1 monotonic
-  ret void
-}
-
-define void @sub7(i8* nocapture %p) nounwind ssp {
-entry:
-; CHECK-LABEL: sub7:
-; CHECK: subb $2
-  %0 = atomicrmw sub i8* %p, i8 2 monotonic
-  ret void
-}
-
-define void @sub3(i8* nocapture %p, i32 %v) nounwind ssp {
-entry:
-; CHECK-LABEL: sub3:
-; CHECK: subb
-	%0 = trunc i32 %v to i8		; <i8> [#uses=1]
-  %1 = atomicrmw sub i8* %p, i8 %0 monotonic
-  ret void
-}
-
-define void @dec2(i16* nocapture %p) nounwind ssp {
-entry:
-; CHECK-LABEL: dec2:
-; CHECK: decw
-; SLOW_INC-LABEL: dec2:
-; SLOW_INC-NOT: decw
-  %0 = atomicrmw sub i16* %p, i16 1 monotonic
-  ret void
-}
-
-define void @sub6(i16* nocapture %p) nounwind ssp {
-entry:
-; CHECK-LABEL: sub6:
-; CHECK: subw $2
-  %0 = atomicrmw sub i16* %p, i16 2 monotonic
-  ret void
-}
-
-define void @sub2(i16* nocapture %p, i32 %v) nounwind ssp {
-entry:
-; CHECK-LABEL: sub2:
-; CHECK-NOT: negl
-; CHECK: subw
-	%0 = trunc i32 %v to i16		; <i16> [#uses=1]
-  %1 = atomicrmw sub i16* %p, i16 %0 monotonic
-  ret void
-}
-
-define void @dec1(i32* nocapture %p) nounwind ssp {
-entry:
-; CHECK-LABEL: dec1:
-; CHECK: decl
-; SLOW_INC-LABEL: dec1:
-; SLOW_INC-NOT: decl
-  %0 = atomicrmw sub i32* %p, i32 1 monotonic
-  ret void
-}
-
-define void @sub5(i32* nocapture %p) nounwind ssp {
-entry:
-; CHECK-LABEL: sub5:
-; CHECK: subl $2
-  %0 = atomicrmw sub i32* %p, i32 2 monotonic
-  ret void
-}

Removed: llvm/trunk/test/CodeGen/X86/atomic_idempotent.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/atomic_idempotent.ll?rev=353955&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/atomic_idempotent.ll (original)
+++ llvm/trunk/test/CodeGen/X86/atomic_idempotent.ll (removed)
@@ -1,166 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs | FileCheck %s --check-prefix=CHECK --check-prefix=X64
-; RUN: llc < %s -mtriple=i686-- -mattr=+sse2 -verify-machineinstrs | FileCheck %s --check-prefix=CHECK --check-prefix=X32
-
-; On x86, an atomic rmw operation that does not modify the value in memory
-; (such as atomic add 0) can be replaced by an mfence followed by a mov.
-; This is explained (with the motivation for such an optimization) in
-; http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf
-
-define i8 @add8(i8* %p) {
-; X64-LABEL: add8:
-; X64:       # %bb.0:
-; X64-NEXT:    mfence
-; X64-NEXT:    movb (%rdi), %al
-; X64-NEXT:    retq
-;
-; X32-LABEL: add8:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    mfence
-; X32-NEXT:    movb (%eax), %al
-; X32-NEXT:    retl
-  %1 = atomicrmw add i8* %p, i8 0 monotonic
-  ret i8 %1
-}
-
-define i16 @or16(i16* %p) {
-; X64-LABEL: or16:
-; X64:       # %bb.0:
-; X64-NEXT:    mfence
-; X64-NEXT:    movzwl (%rdi), %eax
-; X64-NEXT:    retq
-;
-; X32-LABEL: or16:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    mfence
-; X32-NEXT:    movzwl (%eax), %eax
-; X32-NEXT:    retl
-  %1 = atomicrmw or i16* %p, i16 0 acquire
-  ret i16 %1
-}
-
-define i32 @xor32(i32* %p) {
-; X64-LABEL: xor32:
-; X64:       # %bb.0:
-; X64-NEXT:    mfence
-; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    retq
-;
-; X32-LABEL: xor32:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    mfence
-; X32-NEXT:    movl (%eax), %eax
-; X32-NEXT:    retl
-  %1 = atomicrmw xor i32* %p, i32 0 release
-  ret i32 %1
-}
-
-define i64 @sub64(i64* %p) {
-; X64-LABEL: sub64:
-; X64:       # %bb.0:
-; X64-NEXT:    mfence
-; X64-NEXT:    movq (%rdi), %rax
-; X64-NEXT:    retq
-;
-; X32-LABEL: sub64:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %ebx
-; X32-NEXT:    .cfi_def_cfa_offset 8
-; X32-NEXT:    pushl %esi
-; X32-NEXT:    .cfi_def_cfa_offset 12
-; X32-NEXT:    .cfi_offset %esi, -12
-; X32-NEXT:    .cfi_offset %ebx, -8
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT:    movl (%esi), %eax
-; X32-NEXT:    movl 4(%esi), %edx
-; X32-NEXT:    .p2align 4, 0x90
-; X32-NEXT:  .LBB3_1: # %atomicrmw.start
-; X32-NEXT:    # =>This Inner Loop Header: Depth=1
-; X32-NEXT:    movl %edx, %ecx
-; X32-NEXT:    movl %eax, %ebx
-; X32-NEXT:    lock cmpxchg8b (%esi)
-; X32-NEXT:    jne .LBB3_1
-; X32-NEXT:  # %bb.2: # %atomicrmw.end
-; X32-NEXT:    popl %esi
-; X32-NEXT:    .cfi_def_cfa_offset 8
-; X32-NEXT:    popl %ebx
-; X32-NEXT:    .cfi_def_cfa_offset 4
-; X32-NEXT:    retl
-  %1 = atomicrmw sub i64* %p, i64 0 seq_cst
-  ret i64 %1
-}
-
-define i128 @or128(i128* %p) {
-; X64-LABEL: or128:
-; X64:       # %bb.0:
-; X64-NEXT:    pushq %rax
-; X64-NEXT:    .cfi_def_cfa_offset 16
-; X64-NEXT:    xorl %esi, %esi
-; X64-NEXT:    xorl %edx, %edx
-; X64-NEXT:    callq __sync_fetch_and_or_16
-; X64-NEXT:    popq %rcx
-; X64-NEXT:    .cfi_def_cfa_offset 8
-; X64-NEXT:    retq
-;
-; X32-LABEL: or128:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %ebp
-; X32-NEXT:    .cfi_def_cfa_offset 8
-; X32-NEXT:    .cfi_offset %ebp, -8
-; X32-NEXT:    movl %esp, %ebp
-; X32-NEXT:    .cfi_def_cfa_register %ebp
-; X32-NEXT:    pushl %edi
-; X32-NEXT:    pushl %esi
-; X32-NEXT:    andl $-8, %esp
-; X32-NEXT:    subl $16, %esp
-; X32-NEXT:    .cfi_offset %esi, -16
-; X32-NEXT:    .cfi_offset %edi, -12
-; X32-NEXT:    movl 8(%ebp), %esi
-; X32-NEXT:    movl %esp, %eax
-; X32-NEXT:    pushl $0
-; X32-NEXT:    pushl $0
-; X32-NEXT:    pushl $0
-; X32-NEXT:    pushl $0
-; X32-NEXT:    pushl 12(%ebp)
-; X32-NEXT:    pushl %eax
-; X32-NEXT:    calll __sync_fetch_and_or_16
-; X32-NEXT:    addl $20, %esp
-; X32-NEXT:    movl (%esp), %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; X32-NEXT:    movl %edi, 12(%esi)
-; X32-NEXT:    movl %edx, 8(%esi)
-; X32-NEXT:    movl %ecx, 4(%esi)
-; X32-NEXT:    movl %eax, (%esi)
-; X32-NEXT:    movl %esi, %eax
-; X32-NEXT:    leal -8(%ebp), %esp
-; X32-NEXT:    popl %esi
-; X32-NEXT:    popl %edi
-; X32-NEXT:    popl %ebp
-; X32-NEXT:    .cfi_def_cfa %esp, 4
-; X32-NEXT:    retl $4
-  %1 = atomicrmw or i128* %p, i128 0 monotonic
-  ret i128 %1
-}
-
-; For 'and', the idempotent value is (-1)
-define i32 @and32 (i32* %p) {
-; X64-LABEL: and32:
-; X64:       # %bb.0:
-; X64-NEXT:    mfence
-; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    retq
-;
-; X32-LABEL: and32:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    mfence
-; X32-NEXT:    movl (%eax), %eax
-; X32-NEXT:    retl
-  %1 = atomicrmw and i32* %p, i32 -1 acq_rel
-  ret i32 %1
-}

Removed: llvm/trunk/test/CodeGen/X86/atomic_mi.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/atomic_mi.ll?rev=353955&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/atomic_mi.ll (original)
+++ llvm/trunk/test/CodeGen/X86/atomic_mi.ll (removed)
@@ -1,2285 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -verify-machineinstrs | FileCheck %s --check-prefix X64 --check-prefix FAST_INC
-; RUN: llc < %s -mtriple=i686-unknown-unknown -verify-machineinstrs | FileCheck %s --check-prefix X32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=slow-incdec -verify-machineinstrs | FileCheck %s --check-prefix X64 --check-prefix SLOW_INC
-
-; This file checks that atomic (non-seq_cst) stores of immediate values are
-; done in one mov instruction and not 2. More precisely, it makes sure that the
-; immediate is not first copied uselessly into a register.
-
-; Similarily, it checks that a binary operation of an immediate with an atomic
-; variable that is stored back in that variable is done as a single instruction.
-; For example: x.store(42 + x.load(memory_order_acquire), memory_order_release)
-; should be just an add instruction, instead of loading x into a register, doing
-; an add and storing the result back.
-; The binary operations supported are currently add, and, or, xor.
-; sub is not supported because they are translated by an addition of the
-; negated immediate.
-;
-; We also check the same patterns:
-; - For inc/dec.
-; - For register instead of immediate operands.
-; - For floating point operations.
-
-; seq_cst stores are left as (lock) xchgl, but we try to check every other
-; attribute at least once.
-
-; Please note that these operations do not require the lock prefix: only
-; sequentially consistent stores require this kind of protection on X86.
-; And even for seq_cst operations, llvm uses the xchg instruction which has
-; an implicit lock prefix, so making it explicit is not required.
-
-define void @store_atomic_imm_8(i8* %p) {
-; X64-LABEL: store_atomic_imm_8:
-; X64:       # %bb.0:
-; X64-NEXT:    movb $42, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: store_atomic_imm_8:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movb $42, (%eax)
-; X32-NEXT:    retl
-  store atomic i8 42, i8* %p release, align 1
-  ret void
-}
-
-define void @store_atomic_imm_16(i16* %p) {
-; X64-LABEL: store_atomic_imm_16:
-; X64:       # %bb.0:
-; X64-NEXT:    movw $42, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: store_atomic_imm_16:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movw $42, (%eax)
-; X32-NEXT:    retl
-  store atomic i16 42, i16* %p monotonic, align 2
-  ret void
-}
-
-define void @store_atomic_imm_32(i32* %p) {
-; X64-LABEL: store_atomic_imm_32:
-; X64:       # %bb.0:
-; X64-NEXT:    movl $42, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: store_atomic_imm_32:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl $42, (%eax)
-; X32-NEXT:    retl
-;   On 32 bits, there is an extra movl for each of those functions
-;   (probably for alignment reasons).
-  store atomic i32 42, i32* %p release, align 4
-  ret void
-}
-
-define void @store_atomic_imm_64(i64* %p) {
-; X64-LABEL: store_atomic_imm_64:
-; X64:       # %bb.0:
-; X64-NEXT:    movq $42, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: store_atomic_imm_64:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %ebx
-; X32-NEXT:    .cfi_def_cfa_offset 8
-; X32-NEXT:    pushl %esi
-; X32-NEXT:    .cfi_def_cfa_offset 12
-; X32-NEXT:    .cfi_offset %esi, -12
-; X32-NEXT:    .cfi_offset %ebx, -8
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT:    movl (%esi), %eax
-; X32-NEXT:    movl 4(%esi), %edx
-; X32-NEXT:    xorl %ecx, %ecx
-; X32-NEXT:    movl $42, %ebx
-; X32-NEXT:    .p2align 4, 0x90
-; X32-NEXT:  .LBB3_1: # %atomicrmw.start
-; X32-NEXT:    # =>This Inner Loop Header: Depth=1
-; X32-NEXT:    lock cmpxchg8b (%esi)
-; X32-NEXT:    jne .LBB3_1
-; X32-NEXT:  # %bb.2: # %atomicrmw.end
-; X32-NEXT:    popl %esi
-; X32-NEXT:    .cfi_def_cfa_offset 8
-; X32-NEXT:    popl %ebx
-; X32-NEXT:    .cfi_def_cfa_offset 4
-; X32-NEXT:    retl
-;   These are implemented with a CAS loop on 32 bit architectures, and thus
-;   cannot be optimized in the same way as the others.
-  store atomic i64 42, i64* %p release, align 8
-  ret void
-}
-
-; If an immediate is too big to fit in 32 bits, it cannot be store in one mov,
-; even on X64, one must use movabsq that can only target a register.
-define void @store_atomic_imm_64_big(i64* %p) {
-; X64-LABEL: store_atomic_imm_64_big:
-; X64:       # %bb.0:
-; X64-NEXT:    movabsq $100000000000, %rax # imm = 0x174876E800
-; X64-NEXT:    movq %rax, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: store_atomic_imm_64_big:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %ebx
-; X32-NEXT:    .cfi_def_cfa_offset 8
-; X32-NEXT:    pushl %esi
-; X32-NEXT:    .cfi_def_cfa_offset 12
-; X32-NEXT:    .cfi_offset %esi, -12
-; X32-NEXT:    .cfi_offset %ebx, -8
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT:    movl (%esi), %eax
-; X32-NEXT:    movl 4(%esi), %edx
-; X32-NEXT:    movl $23, %ecx
-; X32-NEXT:    movl $1215752192, %ebx # imm = 0x4876E800
-; X32-NEXT:    .p2align 4, 0x90
-; X32-NEXT:  .LBB4_1: # %atomicrmw.start
-; X32-NEXT:    # =>This Inner Loop Header: Depth=1
-; X32-NEXT:    lock cmpxchg8b (%esi)
-; X32-NEXT:    jne .LBB4_1
-; X32-NEXT:  # %bb.2: # %atomicrmw.end
-; X32-NEXT:    popl %esi
-; X32-NEXT:    .cfi_def_cfa_offset 8
-; X32-NEXT:    popl %ebx
-; X32-NEXT:    .cfi_def_cfa_offset 4
-; X32-NEXT:    retl
-  store atomic i64 100000000000, i64* %p monotonic, align 8
-  ret void
-}
-
-; It would be incorrect to replace a lock xchgl by a movl
-define void @store_atomic_imm_32_seq_cst(i32* %p) {
-; X64-LABEL: store_atomic_imm_32_seq_cst:
-; X64:       # %bb.0:
-; X64-NEXT:    movl $42, %eax
-; X64-NEXT:    xchgl %eax, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: store_atomic_imm_32_seq_cst:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl $42, %ecx
-; X32-NEXT:    xchgl %ecx, (%eax)
-; X32-NEXT:    retl
-  store atomic i32 42, i32* %p seq_cst, align 4
-  ret void
-}
-
-; ----- ADD -----
-
-define void @add_8i(i8* %p) {
-; X64-LABEL: add_8i:
-; X64:       # %bb.0:
-; X64-NEXT:    addb $2, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: add_8i:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    addb $2, (%eax)
-; X32-NEXT:    retl
-  %1 = load atomic i8, i8* %p seq_cst, align 1
-  %2 = add i8 %1, 2
-  store atomic i8 %2, i8* %p release, align 1
-  ret void
-}
-
-define void @add_8r(i8* %p, i8 %v) {
-; X64-LABEL: add_8r:
-; X64:       # %bb.0:
-; X64-NEXT:    addb %sil, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: add_8r:
-; X32:       # %bb.0:
-; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    addb %al, (%ecx)
-; X32-NEXT:    retl
-  %1 = load atomic i8, i8* %p seq_cst, align 1
-  %2 = add i8 %1, %v
-  store atomic i8 %2, i8* %p release, align 1
-  ret void
-}
-
-define void @add_16i(i16* %p) {
-; X64-LABEL: add_16i:
-; X64:       # %bb.0:
-; X64-NEXT:    addw $2, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: add_16i:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    addw $2, (%eax)
-; X32-NEXT:    retl
-  %1 = load atomic i16, i16* %p acquire, align 2
-  %2 = add i16 %1, 2
-  store atomic i16 %2, i16* %p release, align 2
-  ret void
-}
-
-define void @add_16r(i16* %p, i16 %v) {
-; X64-LABEL: add_16r:
-; X64:       # %bb.0:
-; X64-NEXT:    addw %si, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: add_16r:
-; X32:       # %bb.0:
-; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    addw %ax, (%ecx)
-; X32-NEXT:    retl
-  %1 = load atomic i16, i16* %p acquire, align 2
-  %2 = add i16 %1, %v
-  store atomic i16 %2, i16* %p release, align 2
-  ret void
-}
-
-define void @add_32i(i32* %p) {
-; X64-LABEL: add_32i:
-; X64:       # %bb.0:
-; X64-NEXT:    addl $2, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: add_32i:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    addl $2, (%eax)
-; X32-NEXT:    retl
-  %1 = load atomic i32, i32* %p acquire, align 4
-  %2 = add i32 %1, 2
-  store atomic i32 %2, i32* %p monotonic, align 4
-  ret void
-}
-
-define void @add_32r(i32* %p, i32 %v) {
-; X64-LABEL: add_32r:
-; X64:       # %bb.0:
-; X64-NEXT:    addl %esi, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: add_32r:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    addl %eax, (%ecx)
-; X32-NEXT:    retl
-  %1 = load atomic i32, i32* %p acquire, align 4
-  %2 = add i32 %1, %v
-  store atomic i32 %2, i32* %p monotonic, align 4
-  ret void
-}
-
-; The following is a corner case where the load is added to itself. The pattern
-; matching should not fold this. We only test with 32-bit add, but the same
-; applies to other sizes and operations.
-define void @add_32r_self(i32* %p) {
-; X64-LABEL: add_32r_self:
-; X64:       # %bb.0:
-; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    addl %eax, %eax
-; X64-NEXT:    movl %eax, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: add_32r_self:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl (%eax), %ecx
-; X32-NEXT:    addl %ecx, %ecx
-; X32-NEXT:    movl %ecx, (%eax)
-; X32-NEXT:    retl
-  %1 = load atomic i32, i32* %p acquire, align 4
-  %2 = add i32 %1, %1
-  store atomic i32 %2, i32* %p monotonic, align 4
-  ret void
-}
-
-; The following is a corner case where the load's result is returned. The
-; optimizer isn't allowed to duplicate the load because it's atomic.
-define i32 @add_32r_ret_load(i32* %p, i32 %v) {
-; X64-LABEL: add_32r_ret_load:
-; X64:       # %bb.0:
-; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    addl %eax, %esi
-; X64-NEXT:    movl %esi, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: add_32r_ret_load:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    movl (%ecx), %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT:    addl %eax, %edx
-; X32-NEXT:    movl %edx, (%ecx)
-; X32-NEXT:    retl
-; More code here, we just don't want it to load from P.
-  %1 = load atomic i32, i32* %p acquire, align 4
-  %2 = add i32 %1, %v
-  store atomic i32 %2, i32* %p monotonic, align 4
-  ret i32 %1
-}
-
-define void @add_64i(i64* %p) {
-; X64-LABEL: add_64i:
-; X64:       # %bb.0:
-; X64-NEXT:    addq $2, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: add_64i:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %ebx
-; X32-NEXT:    .cfi_def_cfa_offset 8
-; X32-NEXT:    pushl %esi
-; X32-NEXT:    .cfi_def_cfa_offset 12
-; X32-NEXT:    .cfi_offset %esi, -12
-; X32-NEXT:    .cfi_offset %ebx, -8
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    xorl %edx, %edx
-; X32-NEXT:    xorl %ecx, %ecx
-; X32-NEXT:    xorl %ebx, %ebx
-; X32-NEXT:    lock cmpxchg8b (%esi)
-; X32-NEXT:    movl %edx, %ecx
-; X32-NEXT:    movl %eax, %ebx
-; X32-NEXT:    addl $2, %ebx
-; X32-NEXT:    adcl $0, %ecx
-; X32-NEXT:    movl (%esi), %eax
-; X32-NEXT:    movl 4(%esi), %edx
-; X32-NEXT:    .p2align 4, 0x90
-; X32-NEXT:  .LBB14_1: # %atomicrmw.start
-; X32-NEXT:    # =>This Inner Loop Header: Depth=1
-; X32-NEXT:    lock cmpxchg8b (%esi)
-; X32-NEXT:    jne .LBB14_1
-; X32-NEXT:  # %bb.2: # %atomicrmw.end
-; X32-NEXT:    popl %esi
-; X32-NEXT:    .cfi_def_cfa_offset 8
-; X32-NEXT:    popl %ebx
-; X32-NEXT:    .cfi_def_cfa_offset 4
-; X32-NEXT:    retl
-;   We do not check X86-32 as it cannot do 'addq'.
-  %1 = load atomic i64, i64* %p acquire, align 8
-  %2 = add i64 %1, 2
-  store atomic i64 %2, i64* %p release, align 8
-  ret void
-}
-
-define void @add_64r(i64* %p, i64 %v) {
-; X64-LABEL: add_64r:
-; X64:       # %bb.0:
-; X64-NEXT:    addq %rsi, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: add_64r:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %ebx
-; X32-NEXT:    .cfi_def_cfa_offset 8
-; X32-NEXT:    pushl %esi
-; X32-NEXT:    .cfi_def_cfa_offset 12
-; X32-NEXT:    .cfi_offset %esi, -12
-; X32-NEXT:    .cfi_offset %ebx, -8
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    xorl %edx, %edx
-; X32-NEXT:    xorl %ecx, %ecx
-; X32-NEXT:    xorl %ebx, %ebx
-; X32-NEXT:    lock cmpxchg8b (%esi)
-; X32-NEXT:    movl %edx, %ecx
-; X32-NEXT:    movl %eax, %ebx
-; X32-NEXT:    addl {{[0-9]+}}(%esp), %ebx
-; X32-NEXT:    adcl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    movl (%esi), %eax
-; X32-NEXT:    movl 4(%esi), %edx
-; X32-NEXT:    .p2align 4, 0x90
-; X32-NEXT:  .LBB15_1: # %atomicrmw.start
-; X32-NEXT:    # =>This Inner Loop Header: Depth=1
-; X32-NEXT:    lock cmpxchg8b (%esi)
-; X32-NEXT:    jne .LBB15_1
-; X32-NEXT:  # %bb.2: # %atomicrmw.end
-; X32-NEXT:    popl %esi
-; X32-NEXT:    .cfi_def_cfa_offset 8
-; X32-NEXT:    popl %ebx
-; X32-NEXT:    .cfi_def_cfa_offset 4
-; X32-NEXT:    retl
-;   We do not check X86-32 as it cannot do 'addq'.
-  %1 = load atomic i64, i64* %p acquire, align 8
-  %2 = add i64 %1, %v
-  store atomic i64 %2, i64* %p release, align 8
-  ret void
-}
-
-define void @add_32i_seq_cst(i32* %p) {
-; X64-LABEL: add_32i_seq_cst:
-; X64:       # %bb.0:
-; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    addl $2, %eax
-; X64-NEXT:    xchgl %eax, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: add_32i_seq_cst:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl (%eax), %ecx
-; X32-NEXT:    addl $2, %ecx
-; X32-NEXT:    xchgl %ecx, (%eax)
-; X32-NEXT:    retl
-  %1 = load atomic i32, i32* %p monotonic, align 4
-  %2 = add i32 %1, 2
-  store atomic i32 %2, i32* %p seq_cst, align 4
-  ret void
-}
-
-define void @add_32r_seq_cst(i32* %p, i32 %v) {
-; X64-LABEL: add_32r_seq_cst:
-; X64:       # %bb.0:
-; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    addl %esi, %eax
-; X64-NEXT:    xchgl %eax, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: add_32r_seq_cst:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl (%eax), %ecx
-; X32-NEXT:    addl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    xchgl %ecx, (%eax)
-; X32-NEXT:    retl
-  %1 = load atomic i32, i32* %p monotonic, align 4
-  %2 = add i32 %1, %v
-  store atomic i32 %2, i32* %p seq_cst, align 4
-  ret void
-}
-
-; ----- SUB -----
-
-define void @sub_8r(i8* %p, i8 %v) {
-; X64-LABEL: sub_8r:
-; X64:       # %bb.0:
-; X64-NEXT:    subb %sil, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: sub_8r:
-; X32:       # %bb.0:
-; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    subb %al, (%ecx)
-; X32-NEXT:    retl
-  %1 = load atomic i8, i8* %p seq_cst, align 1
-  %2 = sub i8 %1, %v
-  store atomic i8 %2, i8* %p release, align 1
-  ret void
-}
-
-define void @sub_16r(i16* %p, i16 %v) {
-; X64-LABEL: sub_16r:
-; X64:       # %bb.0:
-; X64-NEXT:    subw %si, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: sub_16r:
-; X32:       # %bb.0:
-; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    subw %ax, (%ecx)
-; X32-NEXT:    retl
-  %1 = load atomic i16, i16* %p acquire, align 2
-  %2 = sub i16 %1, %v
-  store atomic i16 %2, i16* %p release, align 2
-  ret void
-}
-
-define void @sub_32r(i32* %p, i32 %v) {
-; X64-LABEL: sub_32r:
-; X64:       # %bb.0:
-; X64-NEXT:    subl %esi, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: sub_32r:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    subl %eax, (%ecx)
-; X32-NEXT:    retl
-  %1 = load atomic i32, i32* %p acquire, align 4
-  %2 = sub i32 %1, %v
-  store atomic i32 %2, i32* %p monotonic, align 4
-  ret void
-}
-
-; The following is a corner case where the load is subed to itself. The pattern
-; matching should not fold this. We only test with 32-bit sub, but the same
-; applies to other sizes and operations.
-define void @sub_32r_self(i32* %p) {
-; X64-LABEL: sub_32r_self:
-; X64:       # %bb.0:
-; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    movl $0, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: sub_32r_self:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl (%eax), %ecx
-; X32-NEXT:    movl $0, (%eax)
-; X32-NEXT:    retl
-  %1 = load atomic i32, i32* %p acquire, align 4
-  %2 = sub i32 %1, %1
-  store atomic i32 %2, i32* %p monotonic, align 4
-  ret void
-}
-
-; The following is a corner case where the load's result is returned. The
-; optimizer isn't allowed to duplicate the load because it's atomic.
-define i32 @sub_32r_ret_load(i32* %p, i32 %v) {
-; X64-LABEL: sub_32r_ret_load:
-; X64:       # %bb.0:
-; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    movl %eax, %ecx
-; X64-NEXT:    subl %esi, %ecx
-; X64-NEXT:    movl %ecx, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: sub_32r_ret_load:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    movl (%ecx), %eax
-; X32-NEXT:    movl %eax, %edx
-; X32-NEXT:    subl {{[0-9]+}}(%esp), %edx
-; X32-NEXT:    movl %edx, (%ecx)
-; X32-NEXT:    retl
-; More code here, we just don't want it to load from P.
-  %1 = load atomic i32, i32* %p acquire, align 4
-  %2 = sub i32 %1, %v
-  store atomic i32 %2, i32* %p monotonic, align 4
-  ret i32 %1
-}
-
-define void @sub_64r(i64* %p, i64 %v) {
-; X64-LABEL: sub_64r:
-; X64:       # %bb.0:
-; X64-NEXT:    subq %rsi, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: sub_64r:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %ebx
-; X32-NEXT:    .cfi_def_cfa_offset 8
-; X32-NEXT:    pushl %esi
-; X32-NEXT:    .cfi_def_cfa_offset 12
-; X32-NEXT:    .cfi_offset %esi, -12
-; X32-NEXT:    .cfi_offset %ebx, -8
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    xorl %edx, %edx
-; X32-NEXT:    xorl %ecx, %ecx
-; X32-NEXT:    xorl %ebx, %ebx
-; X32-NEXT:    lock cmpxchg8b (%esi)
-; X32-NEXT:    movl %edx, %ecx
-; X32-NEXT:    movl %eax, %ebx
-; X32-NEXT:    subl {{[0-9]+}}(%esp), %ebx
-; X32-NEXT:    sbbl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    movl (%esi), %eax
-; X32-NEXT:    movl 4(%esi), %edx
-; X32-NEXT:    .p2align 4, 0x90
-; X32-NEXT:  .LBB23_1: # %atomicrmw.start
-; X32-NEXT:    # =>This Inner Loop Header: Depth=1
-; X32-NEXT:    lock cmpxchg8b (%esi)
-; X32-NEXT:    jne .LBB23_1
-; X32-NEXT:  # %bb.2: # %atomicrmw.end
-; X32-NEXT:    popl %esi
-; X32-NEXT:    .cfi_def_cfa_offset 8
-; X32-NEXT:    popl %ebx
-; X32-NEXT:    .cfi_def_cfa_offset 4
-; X32-NEXT:    retl
-;   We do not check X86-32 as it cannot do 'subq'.
-  %1 = load atomic i64, i64* %p acquire, align 8
-  %2 = sub i64 %1, %v
-  store atomic i64 %2, i64* %p release, align 8
-  ret void
-}
-
-define void @sub_32r_seq_cst(i32* %p, i32 %v) {
-; X64-LABEL: sub_32r_seq_cst:
-; X64:       # %bb.0:
-; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    subl %esi, %eax
-; X64-NEXT:    xchgl %eax, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: sub_32r_seq_cst:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl (%eax), %ecx
-; X32-NEXT:    subl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    xchgl %ecx, (%eax)
-; X32-NEXT:    retl
-  %1 = load atomic i32, i32* %p monotonic, align 4
-  %2 = sub i32 %1, %v
-  store atomic i32 %2, i32* %p seq_cst, align 4
-  ret void
-}
-
-; ----- AND -----
-
-define void @and_8i(i8* %p) {
-; X64-LABEL: and_8i:
-; X64:       # %bb.0:
-; X64-NEXT:    andb $2, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: and_8i:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    andb $2, (%eax)
-; X32-NEXT:    retl
-  %1 = load atomic i8, i8* %p monotonic, align 1
-  %2 = and i8 %1, 2
-  store atomic i8 %2, i8* %p release, align 1
-  ret void
-}
-
-define void @and_8r(i8* %p, i8 %v) {
-; X64-LABEL: and_8r:
-; X64:       # %bb.0:
-; X64-NEXT:    andb %sil, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: and_8r:
-; X32:       # %bb.0:
-; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    andb %al, (%ecx)
-; X32-NEXT:    retl
-  %1 = load atomic i8, i8* %p monotonic, align 1
-  %2 = and i8 %1, %v
-  store atomic i8 %2, i8* %p release, align 1
-  ret void
-}
-
-define void @and_16i(i16* %p) {
-; X64-LABEL: and_16i:
-; X64:       # %bb.0:
-; X64-NEXT:    andw $2, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: and_16i:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    andw $2, (%eax)
-; X32-NEXT:    retl
-  %1 = load atomic i16, i16* %p acquire, align 2
-  %2 = and i16 %1, 2
-  store atomic i16 %2, i16* %p release, align 2
-  ret void
-}
-
-define void @and_16r(i16* %p, i16 %v) {
-; X64-LABEL: and_16r:
-; X64:       # %bb.0:
-; X64-NEXT:    andw %si, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: and_16r:
-; X32:       # %bb.0:
-; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    andw %ax, (%ecx)
-; X32-NEXT:    retl
-  %1 = load atomic i16, i16* %p acquire, align 2
-  %2 = and i16 %1, %v
-  store atomic i16 %2, i16* %p release, align 2
-  ret void
-}
-
-define void @and_32i(i32* %p) {
-; X64-LABEL: and_32i:
-; X64:       # %bb.0:
-; X64-NEXT:    andl $2, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: and_32i:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    andl $2, (%eax)
-; X32-NEXT:    retl
-  %1 = load atomic i32, i32* %p acquire, align 4
-  %2 = and i32 %1, 2
-  store atomic i32 %2, i32* %p release, align 4
-  ret void
-}
-
-define void @and_32r(i32* %p, i32 %v) {
-; X64-LABEL: and_32r:
-; X64:       # %bb.0:
-; X64-NEXT:    andl %esi, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: and_32r:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    andl %eax, (%ecx)
-; X32-NEXT:    retl
-  %1 = load atomic i32, i32* %p acquire, align 4
-  %2 = and i32 %1, %v
-  store atomic i32 %2, i32* %p release, align 4
-  ret void
-}
-
-define void @and_64i(i64* %p) {
-; X64-LABEL: and_64i:
-; X64:       # %bb.0:
-; X64-NEXT:    andq $2, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: and_64i:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %ebx
-; X32-NEXT:    .cfi_def_cfa_offset 8
-; X32-NEXT:    pushl %esi
-; X32-NEXT:    .cfi_def_cfa_offset 12
-; X32-NEXT:    .cfi_offset %esi, -12
-; X32-NEXT:    .cfi_offset %ebx, -8
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    xorl %edx, %edx
-; X32-NEXT:    xorl %ecx, %ecx
-; X32-NEXT:    xorl %ebx, %ebx
-; X32-NEXT:    lock cmpxchg8b (%esi)
-; X32-NEXT:    movl %eax, %ebx
-; X32-NEXT:    andl $2, %ebx
-; X32-NEXT:    movl (%esi), %eax
-; X32-NEXT:    movl 4(%esi), %edx
-; X32-NEXT:    xorl %ecx, %ecx
-; X32-NEXT:    .p2align 4, 0x90
-; X32-NEXT:  .LBB31_1: # %atomicrmw.start
-; X32-NEXT:    # =>This Inner Loop Header: Depth=1
-; X32-NEXT:    lock cmpxchg8b (%esi)
-; X32-NEXT:    jne .LBB31_1
-; X32-NEXT:  # %bb.2: # %atomicrmw.end
-; X32-NEXT:    popl %esi
-; X32-NEXT:    .cfi_def_cfa_offset 8
-; X32-NEXT:    popl %ebx
-; X32-NEXT:    .cfi_def_cfa_offset 4
-; X32-NEXT:    retl
-;   We do not check X86-32 as it cannot do 'andq'.
-  %1 = load atomic i64, i64* %p acquire, align 8
-  %2 = and i64 %1, 2
-  store atomic i64 %2, i64* %p release, align 8
-  ret void
-}
-
-define void @and_64r(i64* %p, i64 %v) {
-; X64-LABEL: and_64r:
-; X64:       # %bb.0:
-; X64-NEXT:    andq %rsi, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: and_64r:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %ebx
-; X32-NEXT:    .cfi_def_cfa_offset 8
-; X32-NEXT:    pushl %esi
-; X32-NEXT:    .cfi_def_cfa_offset 12
-; X32-NEXT:    .cfi_offset %esi, -12
-; X32-NEXT:    .cfi_offset %ebx, -8
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    xorl %edx, %edx
-; X32-NEXT:    xorl %ecx, %ecx
-; X32-NEXT:    xorl %ebx, %ebx
-; X32-NEXT:    lock cmpxchg8b (%esi)
-; X32-NEXT:    movl %eax, %ebx
-; X32-NEXT:    movl %edx, %ecx
-; X32-NEXT:    andl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    andl {{[0-9]+}}(%esp), %ebx
-; X32-NEXT:    movl (%esi), %eax
-; X32-NEXT:    movl 4(%esi), %edx
-; X32-NEXT:    .p2align 4, 0x90
-; X32-NEXT:  .LBB32_1: # %atomicrmw.start
-; X32-NEXT:    # =>This Inner Loop Header: Depth=1
-; X32-NEXT:    lock cmpxchg8b (%esi)
-; X32-NEXT:    jne .LBB32_1
-; X32-NEXT:  # %bb.2: # %atomicrmw.end
-; X32-NEXT:    popl %esi
-; X32-NEXT:    .cfi_def_cfa_offset 8
-; X32-NEXT:    popl %ebx
-; X32-NEXT:    .cfi_def_cfa_offset 4
-; X32-NEXT:    retl
-;   We do not check X86-32 as it cannot do 'andq'.
-  %1 = load atomic i64, i64* %p acquire, align 8
-  %2 = and i64 %1, %v
-  store atomic i64 %2, i64* %p release, align 8
-  ret void
-}
-
-define void @and_32i_seq_cst(i32* %p) {
-; X64-LABEL: and_32i_seq_cst:
-; X64:       # %bb.0:
-; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    andl $2, %eax
-; X64-NEXT:    xchgl %eax, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: and_32i_seq_cst:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl (%eax), %ecx
-; X32-NEXT:    andl $2, %ecx
-; X32-NEXT:    xchgl %ecx, (%eax)
-; X32-NEXT:    retl
-  %1 = load atomic i32, i32* %p monotonic, align 4
-  %2 = and i32 %1, 2
-  store atomic i32 %2, i32* %p seq_cst, align 4
-  ret void
-}
-
-define void @and_32r_seq_cst(i32* %p, i32 %v) {
-; X64-LABEL: and_32r_seq_cst:
-; X64:       # %bb.0:
-; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    andl %esi, %eax
-; X64-NEXT:    xchgl %eax, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: and_32r_seq_cst:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl (%eax), %ecx
-; X32-NEXT:    andl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    xchgl %ecx, (%eax)
-; X32-NEXT:    retl
-  %1 = load atomic i32, i32* %p monotonic, align 4
-  %2 = and i32 %1, %v
-  store atomic i32 %2, i32* %p seq_cst, align 4
-  ret void
-}
-
-; ----- OR -----
-
-define void @or_8i(i8* %p) {
-; X64-LABEL: or_8i:
-; X64:       # %bb.0:
-; X64-NEXT:    orb $2, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: or_8i:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    orb $2, (%eax)
-; X32-NEXT:    retl
-  %1 = load atomic i8, i8* %p acquire, align 1
-  %2 = or i8 %1, 2
-  store atomic i8 %2, i8* %p release, align 1
-  ret void
-}
-
-define void @or_8r(i8* %p, i8 %v) {
-; X64-LABEL: or_8r:
-; X64:       # %bb.0:
-; X64-NEXT:    orb %sil, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: or_8r:
-; X32:       # %bb.0:
-; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    orb %al, (%ecx)
-; X32-NEXT:    retl
-  %1 = load atomic i8, i8* %p acquire, align 1
-  %2 = or i8 %1, %v
-  store atomic i8 %2, i8* %p release, align 1
-  ret void
-}
-
-define void @or_16i(i16* %p) {
-; X64-LABEL: or_16i:
-; X64:       # %bb.0:
-; X64-NEXT:    orw $2, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: or_16i:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    orw $2, (%eax)
-; X32-NEXT:    retl
-  %1 = load atomic i16, i16* %p acquire, align 2
-  %2 = or i16 %1, 2
-  store atomic i16 %2, i16* %p release, align 2
-  ret void
-}
-
-define void @or_16r(i16* %p, i16 %v) {
-; X64-LABEL: or_16r:
-; X64:       # %bb.0:
-; X64-NEXT:    orw %si, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: or_16r:
-; X32:       # %bb.0:
-; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    orw %ax, (%ecx)
-; X32-NEXT:    retl
-  %1 = load atomic i16, i16* %p acquire, align 2
-  %2 = or i16 %1, %v
-  store atomic i16 %2, i16* %p release, align 2
-  ret void
-}
-
-define void @or_32i(i32* %p) {
-; X64-LABEL: or_32i:
-; X64:       # %bb.0:
-; X64-NEXT:    orl $2, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: or_32i:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    orl $2, (%eax)
-; X32-NEXT:    retl
-  %1 = load atomic i32, i32* %p acquire, align 4
-  %2 = or i32 %1, 2
-  store atomic i32 %2, i32* %p release, align 4
-  ret void
-}
-
-define void @or_32r(i32* %p, i32 %v) {
-; X64-LABEL: or_32r:
-; X64:       # %bb.0:
-; X64-NEXT:    orl %esi, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: or_32r:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    orl %eax, (%ecx)
-; X32-NEXT:    retl
-  %1 = load atomic i32, i32* %p acquire, align 4
-  %2 = or i32 %1, %v
-  store atomic i32 %2, i32* %p release, align 4
-  ret void
-}
-
-define void @or_64i(i64* %p) {
-; X64-LABEL: or_64i:
-; X64:       # %bb.0:
-; X64-NEXT:    orq $2, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: or_64i:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %ebx
-; X32-NEXT:    .cfi_def_cfa_offset 8
-; X32-NEXT:    pushl %esi
-; X32-NEXT:    .cfi_def_cfa_offset 12
-; X32-NEXT:    .cfi_offset %esi, -12
-; X32-NEXT:    .cfi_offset %ebx, -8
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    xorl %edx, %edx
-; X32-NEXT:    xorl %ecx, %ecx
-; X32-NEXT:    xorl %ebx, %ebx
-; X32-NEXT:    lock cmpxchg8b (%esi)
-; X32-NEXT:    movl %edx, %ecx
-; X32-NEXT:    movl %eax, %ebx
-; X32-NEXT:    orl $2, %ebx
-; X32-NEXT:    movl (%esi), %eax
-; X32-NEXT:    movl 4(%esi), %edx
-; X32-NEXT:    .p2align 4, 0x90
-; X32-NEXT:  .LBB41_1: # %atomicrmw.start
-; X32-NEXT:    # =>This Inner Loop Header: Depth=1
-; X32-NEXT:    lock cmpxchg8b (%esi)
-; X32-NEXT:    jne .LBB41_1
-; X32-NEXT:  # %bb.2: # %atomicrmw.end
-; X32-NEXT:    popl %esi
-; X32-NEXT:    .cfi_def_cfa_offset 8
-; X32-NEXT:    popl %ebx
-; X32-NEXT:    .cfi_def_cfa_offset 4
-; X32-NEXT:    retl
-;   We do not check X86-32 as it cannot do 'orq'.
-  %1 = load atomic i64, i64* %p acquire, align 8
-  %2 = or i64 %1, 2
-  store atomic i64 %2, i64* %p release, align 8
-  ret void
-}
-
-define void @or_64r(i64* %p, i64 %v) {
-; X64-LABEL: or_64r:
-; X64:       # %bb.0:
-; X64-NEXT:    orq %rsi, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: or_64r:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %ebx
-; X32-NEXT:    .cfi_def_cfa_offset 8
-; X32-NEXT:    pushl %esi
-; X32-NEXT:    .cfi_def_cfa_offset 12
-; X32-NEXT:    .cfi_offset %esi, -12
-; X32-NEXT:    .cfi_offset %ebx, -8
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    xorl %edx, %edx
-; X32-NEXT:    xorl %ecx, %ecx
-; X32-NEXT:    xorl %ebx, %ebx
-; X32-NEXT:    lock cmpxchg8b (%esi)
-; X32-NEXT:    movl %eax, %ebx
-; X32-NEXT:    movl %edx, %ecx
-; X32-NEXT:    orl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    orl {{[0-9]+}}(%esp), %ebx
-; X32-NEXT:    movl (%esi), %eax
-; X32-NEXT:    movl 4(%esi), %edx
-; X32-NEXT:    .p2align 4, 0x90
-; X32-NEXT:  .LBB42_1: # %atomicrmw.start
-; X32-NEXT:    # =>This Inner Loop Header: Depth=1
-; X32-NEXT:    lock cmpxchg8b (%esi)
-; X32-NEXT:    jne .LBB42_1
-; X32-NEXT:  # %bb.2: # %atomicrmw.end
-; X32-NEXT:    popl %esi
-; X32-NEXT:    .cfi_def_cfa_offset 8
-; X32-NEXT:    popl %ebx
-; X32-NEXT:    .cfi_def_cfa_offset 4
-; X32-NEXT:    retl
-;   We do not check X86-32 as it cannot do 'orq'.
-  %1 = load atomic i64, i64* %p acquire, align 8
-  %2 = or i64 %1, %v
-  store atomic i64 %2, i64* %p release, align 8
-  ret void
-}
-
-define void @or_32i_seq_cst(i32* %p) {
-; X64-LABEL: or_32i_seq_cst:
-; X64:       # %bb.0:
-; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    orl $2, %eax
-; X64-NEXT:    xchgl %eax, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: or_32i_seq_cst:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl (%eax), %ecx
-; X32-NEXT:    orl $2, %ecx
-; X32-NEXT:    xchgl %ecx, (%eax)
-; X32-NEXT:    retl
-  %1 = load atomic i32, i32* %p monotonic, align 4
-  %2 = or i32 %1, 2
-  store atomic i32 %2, i32* %p seq_cst, align 4
-  ret void
-}
-
-define void @or_32r_seq_cst(i32* %p, i32 %v) {
-; X64-LABEL: or_32r_seq_cst:
-; X64:       # %bb.0:
-; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    orl %esi, %eax
-; X64-NEXT:    xchgl %eax, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: or_32r_seq_cst:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl (%eax), %ecx
-; X32-NEXT:    orl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    xchgl %ecx, (%eax)
-; X32-NEXT:    retl
-  %1 = load atomic i32, i32* %p monotonic, align 4
-  %2 = or i32 %1, %v
-  store atomic i32 %2, i32* %p seq_cst, align 4
-  ret void
-}
-
-; ----- XOR -----
-
-define void @xor_8i(i8* %p) {
-; X64-LABEL: xor_8i:
-; X64:       # %bb.0:
-; X64-NEXT:    xorb $2, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: xor_8i:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    xorb $2, (%eax)
-; X32-NEXT:    retl
-  %1 = load atomic i8, i8* %p acquire, align 1
-  %2 = xor i8 %1, 2
-  store atomic i8 %2, i8* %p release, align 1
-  ret void
-}
-
-define void @xor_8r(i8* %p, i8 %v) {
-; X64-LABEL: xor_8r:
-; X64:       # %bb.0:
-; X64-NEXT:    xorb %sil, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: xor_8r:
-; X32:       # %bb.0:
-; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    xorb %al, (%ecx)
-; X32-NEXT:    retl
-  %1 = load atomic i8, i8* %p acquire, align 1
-  %2 = xor i8 %1, %v
-  store atomic i8 %2, i8* %p release, align 1
-  ret void
-}
-
-define void @xor_16i(i16* %p) {
-; X64-LABEL: xor_16i:
-; X64:       # %bb.0:
-; X64-NEXT:    xorw $2, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: xor_16i:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    xorw $2, (%eax)
-; X32-NEXT:    retl
-  %1 = load atomic i16, i16* %p acquire, align 2
-  %2 = xor i16 %1, 2
-  store atomic i16 %2, i16* %p release, align 2
-  ret void
-}
-
-define void @xor_16r(i16* %p, i16 %v) {
-; X64-LABEL: xor_16r:
-; X64:       # %bb.0:
-; X64-NEXT:    xorw %si, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: xor_16r:
-; X32:       # %bb.0:
-; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    xorw %ax, (%ecx)
-; X32-NEXT:    retl
-  %1 = load atomic i16, i16* %p acquire, align 2
-  %2 = xor i16 %1, %v
-  store atomic i16 %2, i16* %p release, align 2
-  ret void
-}
-
-define void @xor_32i(i32* %p) {
-; X64-LABEL: xor_32i:
-; X64:       # %bb.0:
-; X64-NEXT:    xorl $2, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: xor_32i:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    xorl $2, (%eax)
-; X32-NEXT:    retl
-  %1 = load atomic i32, i32* %p acquire, align 4
-  %2 = xor i32 %1, 2
-  store atomic i32 %2, i32* %p release, align 4
-  ret void
-}
-
-define void @xor_32r(i32* %p, i32 %v) {
-; X64-LABEL: xor_32r:
-; X64:       # %bb.0:
-; X64-NEXT:    xorl %esi, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: xor_32r:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    xorl %eax, (%ecx)
-; X32-NEXT:    retl
-  %1 = load atomic i32, i32* %p acquire, align 4
-  %2 = xor i32 %1, %v
-  store atomic i32 %2, i32* %p release, align 4
-  ret void
-}
-
-define void @xor_64i(i64* %p) {
-; X64-LABEL: xor_64i:
-; X64:       # %bb.0:
-; X64-NEXT:    xorq $2, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: xor_64i:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %ebx
-; X32-NEXT:    .cfi_def_cfa_offset 8
-; X32-NEXT:    pushl %esi
-; X32-NEXT:    .cfi_def_cfa_offset 12
-; X32-NEXT:    .cfi_offset %esi, -12
-; X32-NEXT:    .cfi_offset %ebx, -8
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    xorl %edx, %edx
-; X32-NEXT:    xorl %ecx, %ecx
-; X32-NEXT:    xorl %ebx, %ebx
-; X32-NEXT:    lock cmpxchg8b (%esi)
-; X32-NEXT:    movl %edx, %ecx
-; X32-NEXT:    movl %eax, %ebx
-; X32-NEXT:    xorl $2, %ebx
-; X32-NEXT:    movl (%esi), %eax
-; X32-NEXT:    movl 4(%esi), %edx
-; X32-NEXT:    .p2align 4, 0x90
-; X32-NEXT:  .LBB51_1: # %atomicrmw.start
-; X32-NEXT:    # =>This Inner Loop Header: Depth=1
-; X32-NEXT:    lock cmpxchg8b (%esi)
-; X32-NEXT:    jne .LBB51_1
-; X32-NEXT:  # %bb.2: # %atomicrmw.end
-; X32-NEXT:    popl %esi
-; X32-NEXT:    .cfi_def_cfa_offset 8
-; X32-NEXT:    popl %ebx
-; X32-NEXT:    .cfi_def_cfa_offset 4
-; X32-NEXT:    retl
-;   We do not check X86-32 as it cannot do 'xorq'.
-  %1 = load atomic i64, i64* %p acquire, align 8
-  %2 = xor i64 %1, 2
-  store atomic i64 %2, i64* %p release, align 8
-  ret void
-}
-
-define void @xor_64r(i64* %p, i64 %v) {
-; X64-LABEL: xor_64r:
-; X64:       # %bb.0:
-; X64-NEXT:    xorq %rsi, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: xor_64r:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %ebx
-; X32-NEXT:    .cfi_def_cfa_offset 8
-; X32-NEXT:    pushl %esi
-; X32-NEXT:    .cfi_def_cfa_offset 12
-; X32-NEXT:    .cfi_offset %esi, -12
-; X32-NEXT:    .cfi_offset %ebx, -8
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    xorl %edx, %edx
-; X32-NEXT:    xorl %ecx, %ecx
-; X32-NEXT:    xorl %ebx, %ebx
-; X32-NEXT:    lock cmpxchg8b (%esi)
-; X32-NEXT:    movl %eax, %ebx
-; X32-NEXT:    movl %edx, %ecx
-; X32-NEXT:    xorl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    xorl {{[0-9]+}}(%esp), %ebx
-; X32-NEXT:    movl (%esi), %eax
-; X32-NEXT:    movl 4(%esi), %edx
-; X32-NEXT:    .p2align 4, 0x90
-; X32-NEXT:  .LBB52_1: # %atomicrmw.start
-; X32-NEXT:    # =>This Inner Loop Header: Depth=1
-; X32-NEXT:    lock cmpxchg8b (%esi)
-; X32-NEXT:    jne .LBB52_1
-; X32-NEXT:  # %bb.2: # %atomicrmw.end
-; X32-NEXT:    popl %esi
-; X32-NEXT:    .cfi_def_cfa_offset 8
-; X32-NEXT:    popl %ebx
-; X32-NEXT:    .cfi_def_cfa_offset 4
-; X32-NEXT:    retl
-;   We do not check X86-32 as it cannot do 'xorq'.
-  %1 = load atomic i64, i64* %p acquire, align 8
-  %2 = xor i64 %1, %v
-  store atomic i64 %2, i64* %p release, align 8
-  ret void
-}
-
-define void @xor_32i_seq_cst(i32* %p) {
-; X64-LABEL: xor_32i_seq_cst:
-; X64:       # %bb.0:
-; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    xorl $2, %eax
-; X64-NEXT:    xchgl %eax, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: xor_32i_seq_cst:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl (%eax), %ecx
-; X32-NEXT:    xorl $2, %ecx
-; X32-NEXT:    xchgl %ecx, (%eax)
-; X32-NEXT:    retl
-  %1 = load atomic i32, i32* %p monotonic, align 4
-  %2 = xor i32 %1, 2
-  store atomic i32 %2, i32* %p seq_cst, align 4
-  ret void
-}
-
-define void @xor_32r_seq_cst(i32* %p, i32 %v) {
-; X64-LABEL: xor_32r_seq_cst:
-; X64:       # %bb.0:
-; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    xorl %esi, %eax
-; X64-NEXT:    xchgl %eax, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: xor_32r_seq_cst:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl (%eax), %ecx
-; X32-NEXT:    xorl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    xchgl %ecx, (%eax)
-; X32-NEXT:    retl
-  %1 = load atomic i32, i32* %p monotonic, align 4
-  %2 = xor i32 %1, %v
-  store atomic i32 %2, i32* %p seq_cst, align 4
-  ret void
-}
-
-; ----- INC -----
-
-define void @inc_8(i8* %p) {
-; FAST_INC-LABEL: inc_8:
-; FAST_INC:       # %bb.0:
-; FAST_INC-NEXT:    incb (%rdi)
-; FAST_INC-NEXT:    retq
-;
-; X32-LABEL: inc_8:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    incb (%eax)
-; X32-NEXT:    retl
-;
-; SLOW_INC-LABEL: inc_8:
-; SLOW_INC:       # %bb.0:
-; SLOW_INC-NEXT:    addb $1, (%rdi)
-; SLOW_INC-NEXT:    retq
-  %1 = load atomic i8, i8* %p seq_cst, align 1
-  %2 = add i8 %1, 1
-  store atomic i8 %2, i8* %p release, align 1
-  ret void
-}
-
-define void @inc_16(i16* %p) {
-; FAST_INC-LABEL: inc_16:
-; FAST_INC:       # %bb.0:
-; FAST_INC-NEXT:    incw (%rdi)
-; FAST_INC-NEXT:    retq
-;
-; X32-LABEL: inc_16:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    incw (%eax)
-; X32-NEXT:    retl
-;
-; SLOW_INC-LABEL: inc_16:
-; SLOW_INC:       # %bb.0:
-; SLOW_INC-NEXT:    addw $1, (%rdi)
-; SLOW_INC-NEXT:    retq
-  %1 = load atomic i16, i16* %p acquire, align 2
-  %2 = add i16 %1, 1
-  store atomic i16 %2, i16* %p release, align 2
-  ret void
-}
-
-define void @inc_32(i32* %p) {
-; FAST_INC-LABEL: inc_32:
-; FAST_INC:       # %bb.0:
-; FAST_INC-NEXT:    incl (%rdi)
-; FAST_INC-NEXT:    retq
-;
-; X32-LABEL: inc_32:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    incl (%eax)
-; X32-NEXT:    retl
-;
-; SLOW_INC-LABEL: inc_32:
-; SLOW_INC:       # %bb.0:
-; SLOW_INC-NEXT:    addl $1, (%rdi)
-; SLOW_INC-NEXT:    retq
-  %1 = load atomic i32, i32* %p acquire, align 4
-  %2 = add i32 %1, 1
-  store atomic i32 %2, i32* %p monotonic, align 4
-  ret void
-}
-
-define void @inc_64(i64* %p) {
-; FAST_INC-LABEL: inc_64:
-; FAST_INC:       # %bb.0:
-; FAST_INC-NEXT:    incq (%rdi)
-; FAST_INC-NEXT:    retq
-;
-; X32-LABEL: inc_64:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %ebx
-; X32-NEXT:    .cfi_def_cfa_offset 8
-; X32-NEXT:    pushl %esi
-; X32-NEXT:    .cfi_def_cfa_offset 12
-; X32-NEXT:    .cfi_offset %esi, -12
-; X32-NEXT:    .cfi_offset %ebx, -8
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    xorl %edx, %edx
-; X32-NEXT:    xorl %ecx, %ecx
-; X32-NEXT:    xorl %ebx, %ebx
-; X32-NEXT:    lock cmpxchg8b (%esi)
-; X32-NEXT:    movl %edx, %ecx
-; X32-NEXT:    movl %eax, %ebx
-; X32-NEXT:    addl $1, %ebx
-; X32-NEXT:    adcl $0, %ecx
-; X32-NEXT:    movl (%esi), %eax
-; X32-NEXT:    movl 4(%esi), %edx
-; X32-NEXT:    .p2align 4, 0x90
-; X32-NEXT:  .LBB58_1: # %atomicrmw.start
-; X32-NEXT:    # =>This Inner Loop Header: Depth=1
-; X32-NEXT:    lock cmpxchg8b (%esi)
-; X32-NEXT:    jne .LBB58_1
-; X32-NEXT:  # %bb.2: # %atomicrmw.end
-; X32-NEXT:    popl %esi
-; X32-NEXT:    .cfi_def_cfa_offset 8
-; X32-NEXT:    popl %ebx
-; X32-NEXT:    .cfi_def_cfa_offset 4
-; X32-NEXT:    retl
-;
-; SLOW_INC-LABEL: inc_64:
-; SLOW_INC:       # %bb.0:
-; SLOW_INC-NEXT:    addq $1, (%rdi)
-; SLOW_INC-NEXT:    retq
-;   We do not check X86-32 as it cannot do 'incq'.
-  %1 = load atomic i64, i64* %p acquire, align 8
-  %2 = add i64 %1, 1
-  store atomic i64 %2, i64* %p release, align 8
-  ret void
-}
-
-define void @inc_32_seq_cst(i32* %p) {
-; FAST_INC-LABEL: inc_32_seq_cst:
-; FAST_INC:       # %bb.0:
-; FAST_INC-NEXT:    movl (%rdi), %eax
-; FAST_INC-NEXT:    incl %eax
-; FAST_INC-NEXT:    xchgl %eax, (%rdi)
-; FAST_INC-NEXT:    retq
-;
-; X32-LABEL: inc_32_seq_cst:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl (%eax), %ecx
-; X32-NEXT:    incl %ecx
-; X32-NEXT:    xchgl %ecx, (%eax)
-; X32-NEXT:    retl
-;
-; SLOW_INC-LABEL: inc_32_seq_cst:
-; SLOW_INC:       # %bb.0:
-; SLOW_INC-NEXT:    movl (%rdi), %eax
-; SLOW_INC-NEXT:    addl $1, %eax
-; SLOW_INC-NEXT:    xchgl %eax, (%rdi)
-; SLOW_INC-NEXT:    retq
-  %1 = load atomic i32, i32* %p monotonic, align 4
-  %2 = add i32 %1, 1
-  store atomic i32 %2, i32* %p seq_cst, align 4
-  ret void
-}
-
-; ----- DEC -----
-
-define void @dec_8(i8* %p) {
-; FAST_INC-LABEL: dec_8:
-; FAST_INC:       # %bb.0:
-; FAST_INC-NEXT:    decb (%rdi)
-; FAST_INC-NEXT:    retq
-;
-; X32-LABEL: dec_8:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    decb (%eax)
-; X32-NEXT:    retl
-;
-; SLOW_INC-LABEL: dec_8:
-; SLOW_INC:       # %bb.0:
-; SLOW_INC-NEXT:    addb $-1, (%rdi)
-; SLOW_INC-NEXT:    retq
-  %1 = load atomic i8, i8* %p seq_cst, align 1
-  %2 = sub i8 %1, 1
-  store atomic i8 %2, i8* %p release, align 1
-  ret void
-}
-
-define void @dec_16(i16* %p) {
-; FAST_INC-LABEL: dec_16:
-; FAST_INC:       # %bb.0:
-; FAST_INC-NEXT:    decw (%rdi)
-; FAST_INC-NEXT:    retq
-;
-; X32-LABEL: dec_16:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    decw (%eax)
-; X32-NEXT:    retl
-;
-; SLOW_INC-LABEL: dec_16:
-; SLOW_INC:       # %bb.0:
-; SLOW_INC-NEXT:    addw $-1, (%rdi)
-; SLOW_INC-NEXT:    retq
-  %1 = load atomic i16, i16* %p acquire, align 2
-  %2 = sub i16 %1, 1
-  store atomic i16 %2, i16* %p release, align 2
-  ret void
-}
-
-define void @dec_32(i32* %p) {
-; FAST_INC-LABEL: dec_32:
-; FAST_INC:       # %bb.0:
-; FAST_INC-NEXT:    decl (%rdi)
-; FAST_INC-NEXT:    retq
-;
-; X32-LABEL: dec_32:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    decl (%eax)
-; X32-NEXT:    retl
-;
-; SLOW_INC-LABEL: dec_32:
-; SLOW_INC:       # %bb.0:
-; SLOW_INC-NEXT:    addl $-1, (%rdi)
-; SLOW_INC-NEXT:    retq
-  %1 = load atomic i32, i32* %p acquire, align 4
-  %2 = sub i32 %1, 1
-  store atomic i32 %2, i32* %p monotonic, align 4
-  ret void
-}
-
-define void @dec_64(i64* %p) {
-; FAST_INC-LABEL: dec_64:
-; FAST_INC:       # %bb.0:
-; FAST_INC-NEXT:    decq (%rdi)
-; FAST_INC-NEXT:    retq
-;
-; X32-LABEL: dec_64:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %ebx
-; X32-NEXT:    .cfi_def_cfa_offset 8
-; X32-NEXT:    pushl %esi
-; X32-NEXT:    .cfi_def_cfa_offset 12
-; X32-NEXT:    .cfi_offset %esi, -12
-; X32-NEXT:    .cfi_offset %ebx, -8
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    xorl %edx, %edx
-; X32-NEXT:    xorl %ecx, %ecx
-; X32-NEXT:    xorl %ebx, %ebx
-; X32-NEXT:    lock cmpxchg8b (%esi)
-; X32-NEXT:    movl %edx, %ecx
-; X32-NEXT:    movl %eax, %ebx
-; X32-NEXT:    addl $-1, %ebx
-; X32-NEXT:    adcl $-1, %ecx
-; X32-NEXT:    movl (%esi), %eax
-; X32-NEXT:    movl 4(%esi), %edx
-; X32-NEXT:    .p2align 4, 0x90
-; X32-NEXT:  .LBB63_1: # %atomicrmw.start
-; X32-NEXT:    # =>This Inner Loop Header: Depth=1
-; X32-NEXT:    lock cmpxchg8b (%esi)
-; X32-NEXT:    jne .LBB63_1
-; X32-NEXT:  # %bb.2: # %atomicrmw.end
-; X32-NEXT:    popl %esi
-; X32-NEXT:    .cfi_def_cfa_offset 8
-; X32-NEXT:    popl %ebx
-; X32-NEXT:    .cfi_def_cfa_offset 4
-; X32-NEXT:    retl
-;
-; SLOW_INC-LABEL: dec_64:
-; SLOW_INC:       # %bb.0:
-; SLOW_INC-NEXT:    addq $-1, (%rdi)
-; SLOW_INC-NEXT:    retq
-;   We do not check X86-32 as it cannot do 'decq'.
-  %1 = load atomic i64, i64* %p acquire, align 8
-  %2 = sub i64 %1, 1
-  store atomic i64 %2, i64* %p release, align 8
-  ret void
-}
-
-define void @dec_32_seq_cst(i32* %p) {
-; FAST_INC-LABEL: dec_32_seq_cst:
-; FAST_INC:       # %bb.0:
-; FAST_INC-NEXT:    movl (%rdi), %eax
-; FAST_INC-NEXT:    decl %eax
-; FAST_INC-NEXT:    xchgl %eax, (%rdi)
-; FAST_INC-NEXT:    retq
-;
-; X32-LABEL: dec_32_seq_cst:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl (%eax), %ecx
-; X32-NEXT:    decl %ecx
-; X32-NEXT:    xchgl %ecx, (%eax)
-; X32-NEXT:    retl
-;
-; SLOW_INC-LABEL: dec_32_seq_cst:
-; SLOW_INC:       # %bb.0:
-; SLOW_INC-NEXT:    movl (%rdi), %eax
-; SLOW_INC-NEXT:    addl $-1, %eax
-; SLOW_INC-NEXT:    xchgl %eax, (%rdi)
-; SLOW_INC-NEXT:    retq
-  %1 = load atomic i32, i32* %p monotonic, align 4
-  %2 = sub i32 %1, 1
-  store atomic i32 %2, i32* %p seq_cst, align 4
-  ret void
-}
-
-; ----- NOT -----
-
-define void @not_8(i8* %p) {
-; X64-LABEL: not_8:
-; X64:       # %bb.0:
-; X64-NEXT:    notb (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: not_8:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    notb (%eax)
-; X32-NEXT:    retl
-  %1 = load atomic i8, i8* %p seq_cst, align 1
-  %2 = xor i8 %1, -1
-  store atomic i8 %2, i8* %p release, align 1
-  ret void
-}
-
-define void @not_16(i16* %p) {
-; X64-LABEL: not_16:
-; X64:       # %bb.0:
-; X64-NEXT:    notw (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: not_16:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    notw (%eax)
-; X32-NEXT:    retl
-  %1 = load atomic i16, i16* %p acquire, align 2
-  %2 = xor i16 %1, -1
-  store atomic i16 %2, i16* %p release, align 2
-  ret void
-}
-
-define void @not_32(i32* %p) {
-; X64-LABEL: not_32:
-; X64:       # %bb.0:
-; X64-NEXT:    notl (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: not_32:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    notl (%eax)
-; X32-NEXT:    retl
-  %1 = load atomic i32, i32* %p acquire, align 4
-  %2 = xor i32 %1, -1
-  store atomic i32 %2, i32* %p monotonic, align 4
-  ret void
-}
-
-define void @not_64(i64* %p) {
-; X64-LABEL: not_64:
-; X64:       # %bb.0:
-; X64-NEXT:    notq (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: not_64:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %ebx
-; X32-NEXT:    .cfi_def_cfa_offset 8
-; X32-NEXT:    pushl %esi
-; X32-NEXT:    .cfi_def_cfa_offset 12
-; X32-NEXT:    .cfi_offset %esi, -12
-; X32-NEXT:    .cfi_offset %ebx, -8
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    xorl %edx, %edx
-; X32-NEXT:    xorl %ecx, %ecx
-; X32-NEXT:    xorl %ebx, %ebx
-; X32-NEXT:    lock cmpxchg8b (%esi)
-; X32-NEXT:    movl %eax, %ebx
-; X32-NEXT:    movl %edx, %ecx
-; X32-NEXT:    notl %ecx
-; X32-NEXT:    notl %ebx
-; X32-NEXT:    movl (%esi), %eax
-; X32-NEXT:    movl 4(%esi), %edx
-; X32-NEXT:    .p2align 4, 0x90
-; X32-NEXT:  .LBB68_1: # %atomicrmw.start
-; X32-NEXT:    # =>This Inner Loop Header: Depth=1
-; X32-NEXT:    lock cmpxchg8b (%esi)
-; X32-NEXT:    jne .LBB68_1
-; X32-NEXT:  # %bb.2: # %atomicrmw.end
-; X32-NEXT:    popl %esi
-; X32-NEXT:    .cfi_def_cfa_offset 8
-; X32-NEXT:    popl %ebx
-; X32-NEXT:    .cfi_def_cfa_offset 4
-; X32-NEXT:    retl
-;   We do not check X86-32 as it cannot do 'notq'.
-  %1 = load atomic i64, i64* %p acquire, align 8
-  %2 = xor i64 %1, -1
-  store atomic i64 %2, i64* %p release, align 8
-  ret void
-}
-
-define void @not_32_seq_cst(i32* %p) {
-; X64-LABEL: not_32_seq_cst:
-; X64:       # %bb.0:
-; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    notl %eax
-; X64-NEXT:    xchgl %eax, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: not_32_seq_cst:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl (%eax), %ecx
-; X32-NEXT:    notl %ecx
-; X32-NEXT:    xchgl %ecx, (%eax)
-; X32-NEXT:    retl
-  %1 = load atomic i32, i32* %p monotonic, align 4
-  %2 = xor i32 %1, -1
-  store atomic i32 %2, i32* %p seq_cst, align 4
-  ret void
-}
-
-; ----- NEG -----
-
-define void @neg_8(i8* %p) {
-; X64-LABEL: neg_8:
-; X64:       # %bb.0:
-; X64-NEXT:    negb (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: neg_8:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    negb (%eax)
-; X32-NEXT:    retl
-  %1 = load atomic i8, i8* %p seq_cst, align 1
-  %2 = sub i8 0, %1
-  store atomic i8 %2, i8* %p release, align 1
-  ret void
-}
-
-define void @neg_16(i16* %p) {
-; X64-LABEL: neg_16:
-; X64:       # %bb.0:
-; X64-NEXT:    movzwl (%rdi), %eax
-; X64-NEXT:    negl %eax
-; X64-NEXT:    movw %ax, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: neg_16:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movzwl (%eax), %ecx
-; X32-NEXT:    negl %ecx
-; X32-NEXT:    movw %cx, (%eax)
-; X32-NEXT:    retl
-  %1 = load atomic i16, i16* %p acquire, align 2
-  %2 = sub i16 0, %1
-  store atomic i16 %2, i16* %p release, align 2
-  ret void
-}
-
-define void @neg_32(i32* %p) {
-; X64-LABEL: neg_32:
-; X64:       # %bb.0:
-; X64-NEXT:    negl (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: neg_32:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    negl (%eax)
-; X32-NEXT:    retl
-  %1 = load atomic i32, i32* %p acquire, align 4
-  %2 = sub i32 0, %1
-  store atomic i32 %2, i32* %p monotonic, align 4
-  ret void
-}
-
-define void @neg_64(i64* %p) {
-; X64-LABEL: neg_64:
-; X64:       # %bb.0:
-; X64-NEXT:    negq (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: neg_64:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %ebx
-; X32-NEXT:    .cfi_def_cfa_offset 8
-; X32-NEXT:    pushl %edi
-; X32-NEXT:    .cfi_def_cfa_offset 12
-; X32-NEXT:    pushl %esi
-; X32-NEXT:    .cfi_def_cfa_offset 16
-; X32-NEXT:    .cfi_offset %esi, -16
-; X32-NEXT:    .cfi_offset %edi, -12
-; X32-NEXT:    .cfi_offset %ebx, -8
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; X32-NEXT:    xorl %esi, %esi
-; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    xorl %edx, %edx
-; X32-NEXT:    xorl %ecx, %ecx
-; X32-NEXT:    xorl %ebx, %ebx
-; X32-NEXT:    lock cmpxchg8b (%edi)
-; X32-NEXT:    movl %eax, %ebx
-; X32-NEXT:    negl %ebx
-; X32-NEXT:    sbbl %edx, %esi
-; X32-NEXT:    movl (%edi), %eax
-; X32-NEXT:    movl 4(%edi), %edx
-; X32-NEXT:    .p2align 4, 0x90
-; X32-NEXT:  .LBB73_1: # %atomicrmw.start
-; X32-NEXT:    # =>This Inner Loop Header: Depth=1
-; X32-NEXT:    movl %esi, %ecx
-; X32-NEXT:    lock cmpxchg8b (%edi)
-; X32-NEXT:    jne .LBB73_1
-; X32-NEXT:  # %bb.2: # %atomicrmw.end
-; X32-NEXT:    popl %esi
-; X32-NEXT:    .cfi_def_cfa_offset 12
-; X32-NEXT:    popl %edi
-; X32-NEXT:    .cfi_def_cfa_offset 8
-; X32-NEXT:    popl %ebx
-; X32-NEXT:    .cfi_def_cfa_offset 4
-; X32-NEXT:    retl
-;   We do neg check X86-32 as it canneg do 'negq'.
-  %1 = load atomic i64, i64* %p acquire, align 8
-  %2 = sub i64 0, %1
-  store atomic i64 %2, i64* %p release, align 8
-  ret void
-}
-
-define void @neg_32_seq_cst(i32* %p) {
-; X64-LABEL: neg_32_seq_cst:
-; X64:       # %bb.0:
-; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    negl %eax
-; X64-NEXT:    xchgl %eax, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: neg_32_seq_cst:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl (%eax), %ecx
-; X32-NEXT:    negl %ecx
-; X32-NEXT:    xchgl %ecx, (%eax)
-; X32-NEXT:    retl
-  %1 = load atomic i32, i32* %p monotonic, align 4
-  %2 = sub i32 0, %1
-  store atomic i32 %2, i32* %p seq_cst, align 4
-  ret void
-}
-
-; ----- FADD -----
-
-define void @fadd_32r(float* %loc, float %val) {
-; X64-LABEL: fadd_32r:
-; X64:       # %bb.0:
-; X64-NEXT:    addss (%rdi), %xmm0
-; X64-NEXT:    movss %xmm0, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: fadd_32r:
-; X32:       # %bb.0:
-; X32-NEXT:    subl $8, %esp
-; X32-NEXT:    .cfi_def_cfa_offset 12
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl (%eax), %ecx
-; X32-NEXT:    movl %ecx, (%esp)
-; X32-NEXT:    flds (%esp)
-; X32-NEXT:    fadds {{[0-9]+}}(%esp)
-; X32-NEXT:    fstps {{[0-9]+}}(%esp)
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    movl %ecx, (%eax)
-; X32-NEXT:    addl $8, %esp
-; X32-NEXT:    .cfi_def_cfa_offset 4
-; X32-NEXT:    retl
-; Don't check x86-32.
-; LLVM's SSE handling is conservative on x86-32 even without using atomics.
-  %floc = bitcast float* %loc to i32*
-  %1 = load atomic i32, i32* %floc seq_cst, align 4
-  %2 = bitcast i32 %1 to float
-  %add = fadd float %2, %val
-  %3 = bitcast float %add to i32
-  store atomic i32 %3, i32* %floc release, align 4
-  ret void
-}
-
-define void @fadd_64r(double* %loc, double %val) {
-; X64-LABEL: fadd_64r:
-; X64:       # %bb.0:
-; X64-NEXT:    addsd (%rdi), %xmm0
-; X64-NEXT:    movsd %xmm0, (%rdi)
-; X64-NEXT:    retq
-;
-; X32-LABEL: fadd_64r:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %ebp
-; X32-NEXT:    .cfi_def_cfa_offset 8
-; X32-NEXT:    .cfi_offset %ebp, -8
-; X32-NEXT:    movl %esp, %ebp
-; X32-NEXT:    .cfi_def_cfa_register %ebp
-; X32-NEXT:    pushl %ebx
-; X32-NEXT:    pushl %esi
-; X32-NEXT:    andl $-8, %esp
-; X32-NEXT:    subl $16, %esp
-; X32-NEXT:    .cfi_offset %esi, -16
-; X32-NEXT:    .cfi_offset %ebx, -12
-; X32-NEXT:    movl 8(%ebp), %esi
-; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    xorl %edx, %edx
-; X32-NEXT:    xorl %ecx, %ecx
-; X32-NEXT:    xorl %ebx, %ebx
-; X32-NEXT:    lock cmpxchg8b (%esi)
-; X32-NEXT:    movl %edx, {{[0-9]+}}(%esp)
-; X32-NEXT:    movl %eax, {{[0-9]+}}(%esp)
-; X32-NEXT:    fldl {{[0-9]+}}(%esp)
-; X32-NEXT:    faddl 12(%ebp)
-; X32-NEXT:    fstpl (%esp)
-; X32-NEXT:    movl (%esp), %ebx
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    movl (%esi), %eax
-; X32-NEXT:    movl 4(%esi), %edx
-; X32-NEXT:    .p2align 4, 0x90
-; X32-NEXT:  .LBB76_1: # %atomicrmw.start
-; X32-NEXT:    # =>This Inner Loop Header: Depth=1
-; X32-NEXT:    lock cmpxchg8b (%esi)
-; X32-NEXT:    jne .LBB76_1
-; X32-NEXT:  # %bb.2: # %atomicrmw.end
-; X32-NEXT:    leal -8(%ebp), %esp
-; X32-NEXT:    popl %esi
-; X32-NEXT:    popl %ebx
-; X32-NEXT:    popl %ebp
-; X32-NEXT:    .cfi_def_cfa %esp, 4
-; X32-NEXT:    retl
-; Don't check x86-32 (see comment above).
-  %floc = bitcast double* %loc to i64*
-  %1 = load atomic i64, i64* %floc seq_cst, align 8
-  %2 = bitcast i64 %1 to double
-  %add = fadd double %2, %val
-  %3 = bitcast double %add to i64
-  store atomic i64 %3, i64* %floc release, align 8
-  ret void
-}
-
- at glob32 = global float 0.000000e+00, align 4
- at glob64 = global double 0.000000e+00, align 8
-
-; Floating-point add to a global using an immediate.
-define void @fadd_32g() {
-; X64-LABEL: fadd_32g:
-; X64:       # %bb.0:
-; X64-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X64-NEXT:    addss {{.*}}(%rip), %xmm0
-; X64-NEXT:    movss %xmm0, {{.*}}(%rip)
-; X64-NEXT:    retq
-;
-; X32-LABEL: fadd_32g:
-; X32:       # %bb.0:
-; X32-NEXT:    subl $8, %esp
-; X32-NEXT:    .cfi_def_cfa_offset 12
-; X32-NEXT:    movl glob32, %eax
-; X32-NEXT:    movl %eax, (%esp)
-; X32-NEXT:    fld1
-; X32-NEXT:    fadds (%esp)
-; X32-NEXT:    fstps {{[0-9]+}}(%esp)
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl %eax, glob32
-; X32-NEXT:    addl $8, %esp
-; X32-NEXT:    .cfi_def_cfa_offset 4
-; X32-NEXT:    retl
-; Don't check x86-32 (see comment above).
-  %i = load atomic i32, i32* bitcast (float* @glob32 to i32*) monotonic, align 4
-  %f = bitcast i32 %i to float
-  %add = fadd float %f, 1.000000e+00
-  %s = bitcast float %add to i32
-  store atomic i32 %s, i32* bitcast (float* @glob32 to i32*) monotonic, align 4
-  ret void
-}
-
-define void @fadd_64g() {
-; X64-LABEL: fadd_64g:
-; X64:       # %bb.0:
-; X64-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; X64-NEXT:    addsd {{.*}}(%rip), %xmm0
-; X64-NEXT:    movsd %xmm0, {{.*}}(%rip)
-; X64-NEXT:    retq
-;
-; X32-LABEL: fadd_64g:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %ebp
-; X32-NEXT:    .cfi_def_cfa_offset 8
-; X32-NEXT:    .cfi_offset %ebp, -8
-; X32-NEXT:    movl %esp, %ebp
-; X32-NEXT:    .cfi_def_cfa_register %ebp
-; X32-NEXT:    pushl %ebx
-; X32-NEXT:    andl $-8, %esp
-; X32-NEXT:    subl $24, %esp
-; X32-NEXT:    .cfi_offset %ebx, -12
-; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    xorl %edx, %edx
-; X32-NEXT:    xorl %ecx, %ecx
-; X32-NEXT:    xorl %ebx, %ebx
-; X32-NEXT:    lock cmpxchg8b glob64
-; X32-NEXT:    movl %edx, {{[0-9]+}}(%esp)
-; X32-NEXT:    movl %eax, {{[0-9]+}}(%esp)
-; X32-NEXT:    fld1
-; X32-NEXT:    faddl {{[0-9]+}}(%esp)
-; X32-NEXT:    fstpl (%esp)
-; X32-NEXT:    movl (%esp), %ebx
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    movl glob64+4, %edx
-; X32-NEXT:    movl glob64, %eax
-; X32-NEXT:    .p2align 4, 0x90
-; X32-NEXT:  .LBB78_1: # %atomicrmw.start
-; X32-NEXT:    # =>This Inner Loop Header: Depth=1
-; X32-NEXT:    lock cmpxchg8b glob64
-; X32-NEXT:    jne .LBB78_1
-; X32-NEXT:  # %bb.2: # %atomicrmw.end
-; X32-NEXT:    leal -4(%ebp), %esp
-; X32-NEXT:    popl %ebx
-; X32-NEXT:    popl %ebp
-; X32-NEXT:    .cfi_def_cfa %esp, 4
-; X32-NEXT:    retl
-; Don't check x86-32 (see comment above).
-  %i = load atomic i64, i64* bitcast (double* @glob64 to i64*) monotonic, align 8
-  %f = bitcast i64 %i to double
-  %add = fadd double %f, 1.000000e+00
-  %s = bitcast double %add to i64
-  store atomic i64 %s, i64* bitcast (double* @glob64 to i64*) monotonic, align 8
-  ret void
-}
-
-; Floating-point add to a hard-coded immediate location using an immediate.
-define void @fadd_32imm() {
-; X64-LABEL: fadd_32imm:
-; X64:       # %bb.0:
-; X64-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X64-NEXT:    movl $3735928559, %eax # imm = 0xDEADBEEF
-; X64-NEXT:    addss (%rax), %xmm0
-; X64-NEXT:    movss %xmm0, (%rax)
-; X64-NEXT:    retq
-;
-; X32-LABEL: fadd_32imm:
-; X32:       # %bb.0:
-; X32-NEXT:    subl $8, %esp
-; X32-NEXT:    .cfi_def_cfa_offset 12
-; X32-NEXT:    movl -559038737, %eax
-; X32-NEXT:    movl %eax, (%esp)
-; X32-NEXT:    fld1
-; X32-NEXT:    fadds (%esp)
-; X32-NEXT:    fstps {{[0-9]+}}(%esp)
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl %eax, -559038737
-; X32-NEXT:    addl $8, %esp
-; X32-NEXT:    .cfi_def_cfa_offset 4
-; X32-NEXT:    retl
-; Don't check x86-32 (see comment above).
-  %i = load atomic i32, i32* inttoptr (i32 3735928559 to i32*) monotonic, align 4
-  %f = bitcast i32 %i to float
-  %add = fadd float %f, 1.000000e+00
-  %s = bitcast float %add to i32
-  store atomic i32 %s, i32* inttoptr (i32 3735928559 to i32*) monotonic, align 4
-  ret void
-}
-
-define void @fadd_64imm() {
-; X64-LABEL: fadd_64imm:
-; X64:       # %bb.0:
-; X64-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; X64-NEXT:    movl $3735928559, %eax # imm = 0xDEADBEEF
-; X64-NEXT:    addsd (%rax), %xmm0
-; X64-NEXT:    movsd %xmm0, (%rax)
-; X64-NEXT:    retq
-;
-; X32-LABEL: fadd_64imm:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %ebp
-; X32-NEXT:    .cfi_def_cfa_offset 8
-; X32-NEXT:    .cfi_offset %ebp, -8
-; X32-NEXT:    movl %esp, %ebp
-; X32-NEXT:    .cfi_def_cfa_register %ebp
-; X32-NEXT:    pushl %ebx
-; X32-NEXT:    andl $-8, %esp
-; X32-NEXT:    subl $24, %esp
-; X32-NEXT:    .cfi_offset %ebx, -12
-; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    xorl %edx, %edx
-; X32-NEXT:    xorl %ecx, %ecx
-; X32-NEXT:    xorl %ebx, %ebx
-; X32-NEXT:    lock cmpxchg8b -559038737
-; X32-NEXT:    movl %edx, {{[0-9]+}}(%esp)
-; X32-NEXT:    movl %eax, {{[0-9]+}}(%esp)
-; X32-NEXT:    fld1
-; X32-NEXT:    faddl {{[0-9]+}}(%esp)
-; X32-NEXT:    fstpl (%esp)
-; X32-NEXT:    movl (%esp), %ebx
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    movl -559038737, %eax
-; X32-NEXT:    movl -559038733, %edx
-; X32-NEXT:    .p2align 4, 0x90
-; X32-NEXT:  .LBB80_1: # %atomicrmw.start
-; X32-NEXT:    # =>This Inner Loop Header: Depth=1
-; X32-NEXT:    lock cmpxchg8b -559038737
-; X32-NEXT:    jne .LBB80_1
-; X32-NEXT:  # %bb.2: # %atomicrmw.end
-; X32-NEXT:    leal -4(%ebp), %esp
-; X32-NEXT:    popl %ebx
-; X32-NEXT:    popl %ebp
-; X32-NEXT:    .cfi_def_cfa %esp, 4
-; X32-NEXT:    retl
-; Don't check x86-32 (see comment above).
-  %i = load atomic i64, i64* inttoptr (i64 3735928559 to i64*) monotonic, align 8
-  %f = bitcast i64 %i to double
-  %add = fadd double %f, 1.000000e+00
-  %s = bitcast double %add to i64
-  store atomic i64 %s, i64* inttoptr (i64 3735928559 to i64*) monotonic, align 8
-  ret void
-}
-
-; Floating-point add to a stack location.
-define void @fadd_32stack() {
-; X64-LABEL: fadd_32stack:
-; X64:       # %bb.0:
-; X64-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X64-NEXT:    addss -{{[0-9]+}}(%rsp), %xmm0
-; X64-NEXT:    movss %xmm0, -{{[0-9]+}}(%rsp)
-; X64-NEXT:    retq
-;
-; X32-LABEL: fadd_32stack:
-; X32:       # %bb.0:
-; X32-NEXT:    subl $12, %esp
-; X32-NEXT:    .cfi_def_cfa_offset 16
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl %eax, (%esp)
-; X32-NEXT:    fld1
-; X32-NEXT:    fadds (%esp)
-; X32-NEXT:    fstps {{[0-9]+}}(%esp)
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl %eax, {{[0-9]+}}(%esp)
-; X32-NEXT:    addl $12, %esp
-; X32-NEXT:    .cfi_def_cfa_offset 4
-; X32-NEXT:    retl
-; Don't check x86-32 (see comment above).
-  %ptr = alloca i32, align 4
-  %bc3 = bitcast i32* %ptr to float*
-  %load = load atomic i32, i32* %ptr acquire, align 4
-  %bc0 = bitcast i32 %load to float
-  %fadd = fadd float 1.000000e+00, %bc0
-  %bc1 = bitcast float %fadd to i32
-  store atomic i32 %bc1, i32* %ptr release, align 4
-  ret void
-}
-
-define void @fadd_64stack() {
-; X64-LABEL: fadd_64stack:
-; X64:       # %bb.0:
-; X64-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; X64-NEXT:    addsd -{{[0-9]+}}(%rsp), %xmm0
-; X64-NEXT:    movsd %xmm0, -{{[0-9]+}}(%rsp)
-; X64-NEXT:    retq
-;
-; X32-LABEL: fadd_64stack:
-; X32:       # %bb.0:
-; X32-NEXT:    pushl %ebp
-; X32-NEXT:    .cfi_def_cfa_offset 8
-; X32-NEXT:    .cfi_offset %ebp, -8
-; X32-NEXT:    movl %esp, %ebp
-; X32-NEXT:    .cfi_def_cfa_register %ebp
-; X32-NEXT:    pushl %ebx
-; X32-NEXT:    andl $-8, %esp
-; X32-NEXT:    subl $32, %esp
-; X32-NEXT:    .cfi_offset %ebx, -12
-; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    xorl %edx, %edx
-; X32-NEXT:    xorl %ecx, %ecx
-; X32-NEXT:    xorl %ebx, %ebx
-; X32-NEXT:    lock cmpxchg8b (%esp)
-; X32-NEXT:    movl %edx, {{[0-9]+}}(%esp)
-; X32-NEXT:    movl %eax, {{[0-9]+}}(%esp)
-; X32-NEXT:    fld1
-; X32-NEXT:    faddl {{[0-9]+}}(%esp)
-; X32-NEXT:    fstpl {{[0-9]+}}(%esp)
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ebx
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    movl (%esp), %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT:    .p2align 4, 0x90
-; X32-NEXT:  .LBB82_1: # %atomicrmw.start
-; X32-NEXT:    # =>This Inner Loop Header: Depth=1
-; X32-NEXT:    lock cmpxchg8b (%esp)
-; X32-NEXT:    jne .LBB82_1
-; X32-NEXT:  # %bb.2: # %atomicrmw.end
-; X32-NEXT:    leal -4(%ebp), %esp
-; X32-NEXT:    popl %ebx
-; X32-NEXT:    popl %ebp
-; X32-NEXT:    .cfi_def_cfa %esp, 4
-; X32-NEXT:    retl
-; Don't check x86-32 (see comment above).
-  %ptr = alloca i64, align 8
-  %bc3 = bitcast i64* %ptr to double*
-  %load = load atomic i64, i64* %ptr acquire, align 8
-  %bc0 = bitcast i64 %load to double
-  %fadd = fadd double 1.000000e+00, %bc0
-  %bc1 = bitcast double %fadd to i64
-  store atomic i64 %bc1, i64* %ptr release, align 8
-  ret void
-}
-
-define void @fadd_array(i64* %arg, double %arg1, i64 %arg2) {
-; X64-LABEL: fadd_array:
-; X64:       # %bb.0: # %bb
-; X64-NEXT:    addsd (%rdi,%rsi,8), %xmm0
-; X64-NEXT:    movsd %xmm0, (%rdi,%rsi,8)
-; X64-NEXT:    retq
-;
-; X32-LABEL: fadd_array:
-; X32:       # %bb.0: # %bb
-; X32-NEXT:    pushl %ebp
-; X32-NEXT:    .cfi_def_cfa_offset 8
-; X32-NEXT:    .cfi_offset %ebp, -8
-; X32-NEXT:    movl %esp, %ebp
-; X32-NEXT:    .cfi_def_cfa_register %ebp
-; X32-NEXT:    pushl %ebx
-; X32-NEXT:    pushl %edi
-; X32-NEXT:    pushl %esi
-; X32-NEXT:    andl $-8, %esp
-; X32-NEXT:    subl $24, %esp
-; X32-NEXT:    .cfi_offset %esi, -20
-; X32-NEXT:    .cfi_offset %edi, -16
-; X32-NEXT:    .cfi_offset %ebx, -12
-; X32-NEXT:    movl 20(%ebp), %esi
-; X32-NEXT:    movl 8(%ebp), %edi
-; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    xorl %edx, %edx
-; X32-NEXT:    xorl %ecx, %ecx
-; X32-NEXT:    xorl %ebx, %ebx
-; X32-NEXT:    lock cmpxchg8b (%edi,%esi,8)
-; X32-NEXT:    movl %edx, {{[0-9]+}}(%esp)
-; X32-NEXT:    movl %eax, {{[0-9]+}}(%esp)
-; X32-NEXT:    fldl {{[0-9]+}}(%esp)
-; X32-NEXT:    faddl 12(%ebp)
-; X32-NEXT:    fstpl (%esp)
-; X32-NEXT:    movl (%esp), %ebx
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    movl (%edi,%esi,8), %eax
-; X32-NEXT:    movl 4(%edi,%esi,8), %edx
-; X32-NEXT:    .p2align 4, 0x90
-; X32-NEXT:  .LBB83_1: # %atomicrmw.start
-; X32-NEXT:    # =>This Inner Loop Header: Depth=1
-; X32-NEXT:    lock cmpxchg8b (%edi,%esi,8)
-; X32-NEXT:    jne .LBB83_1
-; X32-NEXT:  # %bb.2: # %atomicrmw.end
-; X32-NEXT:    leal -12(%ebp), %esp
-; X32-NEXT:    popl %esi
-; X32-NEXT:    popl %edi
-; X32-NEXT:    popl %ebx
-; X32-NEXT:    popl %ebp
-; X32-NEXT:    .cfi_def_cfa %esp, 4
-; X32-NEXT:    retl
-; Don't check x86-32 (see comment above).
-bb:
-  %tmp4 = getelementptr inbounds i64, i64* %arg, i64 %arg2
-  %tmp6 = load atomic i64, i64* %tmp4 monotonic, align 8
-  %tmp7 = bitcast i64 %tmp6 to double
-  %tmp8 = fadd double %tmp7, %arg1
-  %tmp9 = bitcast double %tmp8 to i64
-  store atomic i64 %tmp9, i64* %tmp4 monotonic, align 8
-  ret void
-}

Removed: llvm/trunk/test/CodeGen/X86/atomic_op.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/atomic_op.ll?rev=353955&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/atomic_op.ll (original)
+++ llvm/trunk/test/CodeGen/X86/atomic_op.ll (removed)
@@ -1,141 +0,0 @@
-; RUN: llc < %s -mcpu=generic -mtriple=i686-- -mattr=+cmov,cx16 -verify-machineinstrs | FileCheck %s
-
-target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
-
-define void @func(i32 %argc, i8** %argv) nounwind {
-entry:
-	%argc.addr = alloca i32		; <i32*> [#uses=1]
-	%argv.addr = alloca i8**		; <i8***> [#uses=1]
-	%val1 = alloca i32		; <i32*> [#uses=2]
-	%val2 = alloca i32		; <i32*> [#uses=15]
-	%andt = alloca i32		; <i32*> [#uses=2]
-	%ort = alloca i32		; <i32*> [#uses=2]
-	%xort = alloca i32		; <i32*> [#uses=2]
-	%old = alloca i32		; <i32*> [#uses=18]
-	%temp = alloca i32		; <i32*> [#uses=2]
-	%temp64 = alloca i64
-	store i32 %argc, i32* %argc.addr
-	store i8** %argv, i8*** %argv.addr
-	store i32 0, i32* %val1
-	store i32 31, i32* %val2
-	store i32 3855, i32* %andt
-	store i32 3855, i32* %ort
-	store i32 3855, i32* %xort
-	store i32 4, i32* %temp
-	%tmp = load i32, i32* %temp
-        ; CHECK: lock
-        ; CHECK: xaddl
-  %0 = atomicrmw add i32* %val1, i32 %tmp monotonic
-	store i32 %0, i32* %old
-        ; CHECK: lock
-        ; CHECK: xaddl
-  %1 = atomicrmw sub i32* %val2, i32 30 monotonic
-	store i32 %1, i32* %old
-        ; CHECK: lock
-        ; CHECK: xaddl
-  %2 = atomicrmw add i32* %val2, i32 1 monotonic
-	store i32 %2, i32* %old
-        ; CHECK: lock
-        ; CHECK: xaddl
-  %3 = atomicrmw sub i32* %val2, i32 1 monotonic
-	store i32 %3, i32* %old
-        ; CHECK: andl
-        ; CHECK: lock
-        ; CHECK: cmpxchgl
-  %4 = atomicrmw and i32* %andt, i32 4080 monotonic
-	store i32 %4, i32* %old
-        ; CHECK: orl
-        ; CHECK: lock
-        ; CHECK: cmpxchgl
-  %5 = atomicrmw or i32* %ort, i32 4080 monotonic
-	store i32 %5, i32* %old
-        ; CHECK: xorl
-        ; CHECK: lock
-        ; CHECK: cmpxchgl
-  %6 = atomicrmw xor i32* %xort, i32 4080 monotonic
-	store i32 %6, i32* %old
-        ; CHECK: cmov
-        ; CHECK: lock
-        ; CHECK: cmpxchgl
-  %7 = atomicrmw min i32* %val2, i32 16 monotonic
-	store i32 %7, i32* %old
-	%neg = sub i32 0, 1		; <i32> [#uses=1]
-        ; CHECK: cmov
-        ; CHECK: lock
-        ; CHECK: cmpxchgl
-  %8 = atomicrmw min i32* %val2, i32 %neg monotonic
-	store i32 %8, i32* %old
-        ; CHECK: cmov
-        ; CHECK: lock
-        ; CHECK: cmpxchgl
-  %9 = atomicrmw max i32* %val2, i32 1 monotonic
-	store i32 %9, i32* %old
-        ; CHECK: cmov
-        ; CHECK: lock
-        ; CHECK: cmpxchgl
-  %10 = atomicrmw max i32* %val2, i32 0 monotonic
-	store i32 %10, i32* %old
-        ; CHECK: cmov
-        ; CHECK: lock
-        ; CHECK: cmpxchgl
-  %11 = atomicrmw umax i32* %val2, i32 65535 monotonic
-	store i32 %11, i32* %old
-        ; CHECK: cmov
-        ; CHECK: lock
-        ; CHECK: cmpxchgl
-  %12 = atomicrmw umax i32* %val2, i32 10 monotonic
-	store i32 %12, i32* %old
-        ; CHECK: cmov
-        ; CHECK: lock
-        ; CHECK: cmpxchgl
-  %13 = atomicrmw umin i32* %val2, i32 1 monotonic
-	store i32 %13, i32* %old
-        ; CHECK: cmov
-        ; CHECK: lock
-        ; CHECK: cmpxchgl
-  %14 = atomicrmw umin i32* %val2, i32 10 monotonic
-	store i32 %14, i32* %old
-        ; CHECK: xchgl   %{{.*}}, {{.*}}(%esp)
-  %15 = atomicrmw xchg i32* %val2, i32 1976 monotonic
-	store i32 %15, i32* %old
-	%neg1 = sub i32 0, 10		; <i32> [#uses=1]
-        ; CHECK: lock
-        ; CHECK: cmpxchgl
-  %pair16 = cmpxchg i32* %val2, i32 %neg1, i32 1 monotonic monotonic
-  %16 = extractvalue { i32, i1 } %pair16, 0
-	store i32 %16, i32* %old
-        ; CHECK: lock
-        ; CHECK: cmpxchgl
-  %pair17 = cmpxchg i32* %val2, i32 1976, i32 1 monotonic monotonic
-  %17 = extractvalue { i32, i1 } %pair17, 0
-	store i32 %17, i32* %old
-        ; CHECK: movl  [[R17atomic:.*]], %eax
-        ; CHECK: movl %eax, %[[R17mask:[a-z]*]]
-        ; CHECK: notl %[[R17mask]]
-        ; CHECK: orl $-1402, %[[R17mask]]
-        ; CHECK: lock
-        ; CHECK: cmpxchgl	%[[R17mask]], [[R17atomic]]
-        ; CHECK: jne
-        ; CHECK: movl	%eax,
-  %18 = atomicrmw nand i32* %val2, i32 1401 monotonic
-  store i32 %18, i32* %old
-        ; CHECK: notl
-        ; CHECK: notl
-        ; CHECK: orl $252645135
-        ; CHECK: orl $252645135
-        ; CHECK: lock
-        ; CHECK: cmpxchg8b
-  %19 = atomicrmw nand i64* %temp64, i64 17361641481138401520 monotonic
-  store i64 %19, i64* %temp64
-	ret void
-}
-
-define void @test2(i32 addrspace(256)* nocapture %P) nounwind {
-entry:
-; CHECK: lock
-; CHECK:	cmpxchgl	%{{.*}}, %gs:(%{{.*}})
-
-  %pair0 = cmpxchg i32 addrspace(256)* %P, i32 0, i32 1 monotonic monotonic
-  %0 = extractvalue { i32, i1 } %pair0, 0
-  ret void
-}




More information about the llvm-commits mailing list