[llvm] e79ef93 - [X86] Rearrange a few atomics tests. NFC.

James Y Knight via llvm-commits llvm-commits at lists.llvm.org
Sat Dec 9 18:05:06 PST 2023


Author: James Y Knight
Date: 2023-12-09T20:53:29-05:00
New Revision: e79ef93c8361f78f50f8c37a955c87131f7c60cf

URL: https://github.com/llvm/llvm-project/commit/e79ef93c8361f78f50f8c37a955c87131f7c60cf
DIFF: https://github.com/llvm/llvm-project/commit/e79ef93c8361f78f50f8c37a955c87131f7c60cf.diff

LOG: [X86] Rearrange a few atomics tests. NFC.

Added: 
    llvm/test/CodeGen/X86/atomic-nocx16.ll

Modified: 
    llvm/test/CodeGen/X86/atomic-non-integer-fp128.ll
    llvm/test/CodeGen/X86/atomic-non-integer.ll
    llvm/test/CodeGen/X86/atomic128.ll

Removed: 
    llvm/test/CodeGen/X86/atomicf128.ll
    llvm/test/CodeGen/X86/nocx16.ll


################################################################################
diff  --git a/llvm/test/CodeGen/X86/atomic-nocx16.ll b/llvm/test/CodeGen/X86/atomic-nocx16.ll
new file mode 100644
index 00000000000000..5677541242a249
--- /dev/null
+++ b/llvm/test/CodeGen/X86/atomic-nocx16.ll
@@ -0,0 +1,49 @@
+; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -mcpu=corei7 -mattr=-cx16 | FileCheck %s
+; RUN: llc < %s -mtriple=i386-linux-gnu -verify-machineinstrs -mattr=cx16 | FileCheck -check-prefix=CHECK %s
+
+;; Verify that 128-bit atomics emit a libcall without cx16
+;; available.
+;;
+;; We test 32-bit mode with -mattr=cx16, because it should have no
+;; effect for 32-bit mode.
+
+; CHECK-LABEL: test:
+define void @test(ptr %a) nounwind {
+entry:
+; CHECK: __sync_val_compare_and_swap_16
+  %0 = cmpxchg ptr %a, i128 1, i128 1 seq_cst seq_cst
+; CHECK: __sync_lock_test_and_set_16
+  %1 = atomicrmw xchg ptr %a, i128 1 seq_cst
+; CHECK: __sync_fetch_and_add_16
+  %2 = atomicrmw add ptr %a, i128 1 seq_cst
+; CHECK: __sync_fetch_and_sub_16
+  %3 = atomicrmw sub ptr %a, i128 1 seq_cst
+; CHECK: __sync_fetch_and_and_16
+  %4 = atomicrmw and ptr %a, i128 1 seq_cst
+; CHECK: __sync_fetch_and_nand_16
+  %5 = atomicrmw nand ptr %a, i128 1 seq_cst
+; CHECK: __sync_fetch_and_or_16
+  %6 = atomicrmw or ptr %a, i128 1 seq_cst
+; CHECK: __sync_fetch_and_xor_16
+  %7 = atomicrmw xor ptr %a, i128 1 seq_cst
+; CHECK: __sync_val_compare_and_swap_16
+  %8 = load atomic i128, ptr %a seq_cst, align 16
+; CHECK: __sync_lock_test_and_set_16
+  store atomic i128 %8, ptr %a seq_cst, align 16
+  ret void
+}
+
+; CHECK-LABEL: test_fp:
+define void @test_fp(fp128* %a) nounwind {
+entry:
+; CHECK: __sync_lock_test_and_set_16
+  %0 = atomicrmw xchg fp128* %a, fp128 0xL00000000000000004000900000000000 seq_cst
+; Currently fails to compile:
+;  %1 = atomicrmw fadd fp128* %a, fp128 0xL00000000000000004000900000000000 seq_cst
+;  %2 = atomicrmw fsub fp128* %a, fp128 0xL00000000000000004000900000000000 seq_cst
+; CHECK: __sync_val_compare_and_swap_16
+  %1 = load atomic fp128, fp128* %a seq_cst, align 16
+; CHECK: __sync_lock_test_and_set_16
+  store atomic fp128 %1, fp128* %a seq_cst, align 16
+  ret void
+}

diff  --git a/llvm/test/CodeGen/X86/atomic-non-integer-fp128.ll b/llvm/test/CodeGen/X86/atomic-non-integer-fp128.ll
index 6c4d112330f2e9..9555c45086d6fe 100644
--- a/llvm/test/CodeGen/X86/atomic-non-integer-fp128.ll
+++ b/llvm/test/CodeGen/X86/atomic-non-integer-fp128.ll
@@ -1,35 +1,139 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mattr=-sse | FileCheck %s --check-prefix=X64-NOSSE
-; RUN: llc < %s -mtriple=x86_64-linux-generic -verify-machineinstrs | FileCheck %s --check-prefix=X64-SSE
+; RUN: llc < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mattr=sse2,cx16 | FileCheck %s --check-prefixes=X64-SSE
+; RUN: llc < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mattr=avx,cx16 | FileCheck %s --check-prefixes=X64-AVX
+; RUN: llc < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mattr=avx512f,cx16 | FileCheck %s --check-prefixes=X64-AVX
 
-; Note: This test is testing that the lowering for atomics matches what we
-; currently emit for non-atomics + the atomic restriction.  The presence of
-; particular lowering detail in these tests should not be read as requiring
-; that detail for correctness unless it's related to the atomicity itself.
-; (Specifically, there were reviewer questions about the lowering for halfs
-;  and their calling convention which remain unresolved.)
+; Codegen of fp128 without cx16 is tested in atomic-nocx16.ll
 
 define void @store_fp128(ptr %fptr, fp128 %v) {
-; X64-NOSSE-LABEL: store_fp128:
-; X64-NOSSE:       # %bb.0:
-; X64-NOSSE-NEXT:    pushq %rax
-; X64-NOSSE-NEXT:    .cfi_def_cfa_offset 16
-; X64-NOSSE-NEXT:    callq __sync_lock_test_and_set_16 at PLT
-; X64-NOSSE-NEXT:    popq %rax
-; X64-NOSSE-NEXT:    .cfi_def_cfa_offset 8
-; X64-NOSSE-NEXT:    retq
-;
 ; X64-SSE-LABEL: store_fp128:
 ; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    subq $24, %rsp
-; X64-SSE-NEXT:    .cfi_def_cfa_offset 32
-; X64-SSE-NEXT:    movaps %xmm0, (%rsp)
-; X64-SSE-NEXT:    movq (%rsp), %rsi
-; X64-SSE-NEXT:    movq {{[0-9]+}}(%rsp), %rdx
-; X64-SSE-NEXT:    callq __sync_lock_test_and_set_16 at PLT
-; X64-SSE-NEXT:    addq $24, %rsp
+; X64-SSE-NEXT:    pushq %rbx
+; X64-SSE-NEXT:    .cfi_def_cfa_offset 16
+; X64-SSE-NEXT:    .cfi_offset %rbx, -16
+; X64-SSE-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-SSE-NEXT:    movq -{{[0-9]+}}(%rsp), %rbx
+; X64-SSE-NEXT:    movq -{{[0-9]+}}(%rsp), %rcx
+; X64-SSE-NEXT:    movq (%rdi), %rax
+; X64-SSE-NEXT:    movq 8(%rdi), %rdx
+; X64-SSE-NEXT:    .p2align 4, 0x90
+; X64-SSE-NEXT:  .LBB0_1: # %atomicrmw.start
+; X64-SSE-NEXT:    # =>This Inner Loop Header: Depth=1
+; X64-SSE-NEXT:    lock cmpxchg16b (%rdi)
+; X64-SSE-NEXT:    jne .LBB0_1
+; X64-SSE-NEXT:  # %bb.2: # %atomicrmw.end
+; X64-SSE-NEXT:    popq %rbx
 ; X64-SSE-NEXT:    .cfi_def_cfa_offset 8
 ; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: store_fp128:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    pushq %rbx
+; X64-AVX-NEXT:    .cfi_def_cfa_offset 16
+; X64-AVX-NEXT:    .cfi_offset %rbx, -16
+; X64-AVX-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-AVX-NEXT:    movq -{{[0-9]+}}(%rsp), %rbx
+; X64-AVX-NEXT:    movq -{{[0-9]+}}(%rsp), %rcx
+; X64-AVX-NEXT:    movq (%rdi), %rax
+; X64-AVX-NEXT:    movq 8(%rdi), %rdx
+; X64-AVX-NEXT:    .p2align 4, 0x90
+; X64-AVX-NEXT:  .LBB0_1: # %atomicrmw.start
+; X64-AVX-NEXT:    # =>This Inner Loop Header: Depth=1
+; X64-AVX-NEXT:    lock cmpxchg16b (%rdi)
+; X64-AVX-NEXT:    jne .LBB0_1
+; X64-AVX-NEXT:  # %bb.2: # %atomicrmw.end
+; X64-AVX-NEXT:    popq %rbx
+; X64-AVX-NEXT:    .cfi_def_cfa_offset 8
+; X64-AVX-NEXT:    retq
   store atomic fp128 %v, ptr %fptr unordered, align 16
   ret void
 }
+
+define fp128 @load_fp128(ptr %fptr) {
+; X64-SSE-LABEL: load_fp128:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    pushq %rbx
+; X64-SSE-NEXT:    .cfi_def_cfa_offset 16
+; X64-SSE-NEXT:    .cfi_offset %rbx, -16
+; X64-SSE-NEXT:    xorl %eax, %eax
+; X64-SSE-NEXT:    xorl %edx, %edx
+; X64-SSE-NEXT:    xorl %ecx, %ecx
+; X64-SSE-NEXT:    xorl %ebx, %ebx
+; X64-SSE-NEXT:    lock cmpxchg16b (%rdi)
+; X64-SSE-NEXT:    movq %rdx, -{{[0-9]+}}(%rsp)
+; X64-SSE-NEXT:    movq %rax, -{{[0-9]+}}(%rsp)
+; X64-SSE-NEXT:    movaps -{{[0-9]+}}(%rsp), %xmm0
+; X64-SSE-NEXT:    popq %rbx
+; X64-SSE-NEXT:    .cfi_def_cfa_offset 8
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: load_fp128:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    pushq %rbx
+; X64-AVX-NEXT:    .cfi_def_cfa_offset 16
+; X64-AVX-NEXT:    .cfi_offset %rbx, -16
+; X64-AVX-NEXT:    xorl %eax, %eax
+; X64-AVX-NEXT:    xorl %edx, %edx
+; X64-AVX-NEXT:    xorl %ecx, %ecx
+; X64-AVX-NEXT:    xorl %ebx, %ebx
+; X64-AVX-NEXT:    lock cmpxchg16b (%rdi)
+; X64-AVX-NEXT:    movq %rdx, -{{[0-9]+}}(%rsp)
+; X64-AVX-NEXT:    movq %rax, -{{[0-9]+}}(%rsp)
+; X64-AVX-NEXT:    vmovaps -{{[0-9]+}}(%rsp), %xmm0
+; X64-AVX-NEXT:    popq %rbx
+; X64-AVX-NEXT:    .cfi_def_cfa_offset 8
+; X64-AVX-NEXT:    retq
+  %v = load atomic fp128, ptr %fptr unordered, align 16
+  ret fp128 %v
+}
+
+define fp128 @exchange_fp128(ptr %fptr, fp128 %x) {
+; X64-SSE-LABEL: exchange_fp128:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    pushq %rbx
+; X64-SSE-NEXT:    .cfi_def_cfa_offset 16
+; X64-SSE-NEXT:    .cfi_offset %rbx, -16
+; X64-SSE-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-SSE-NEXT:    movq -{{[0-9]+}}(%rsp), %rbx
+; X64-SSE-NEXT:    movq -{{[0-9]+}}(%rsp), %rcx
+; X64-SSE-NEXT:    movq (%rdi), %rax
+; X64-SSE-NEXT:    movq 8(%rdi), %rdx
+; X64-SSE-NEXT:    .p2align 4, 0x90
+; X64-SSE-NEXT:  .LBB2_1: # %atomicrmw.start
+; X64-SSE-NEXT:    # =>This Inner Loop Header: Depth=1
+; X64-SSE-NEXT:    lock cmpxchg16b (%rdi)
+; X64-SSE-NEXT:    jne .LBB2_1
+; X64-SSE-NEXT:  # %bb.2: # %atomicrmw.end
+; X64-SSE-NEXT:    movq %rax, -{{[0-9]+}}(%rsp)
+; X64-SSE-NEXT:    movq %rdx, -{{[0-9]+}}(%rsp)
+; X64-SSE-NEXT:    movaps -{{[0-9]+}}(%rsp), %xmm0
+; X64-SSE-NEXT:    popq %rbx
+; X64-SSE-NEXT:    .cfi_def_cfa_offset 8
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: exchange_fp128:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    pushq %rbx
+; X64-AVX-NEXT:    .cfi_def_cfa_offset 16
+; X64-AVX-NEXT:    .cfi_offset %rbx, -16
+; X64-AVX-NEXT:    vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; X64-AVX-NEXT:    movq -{{[0-9]+}}(%rsp), %rbx
+; X64-AVX-NEXT:    movq -{{[0-9]+}}(%rsp), %rcx
+; X64-AVX-NEXT:    movq (%rdi), %rax
+; X64-AVX-NEXT:    movq 8(%rdi), %rdx
+; X64-AVX-NEXT:    .p2align 4, 0x90
+; X64-AVX-NEXT:  .LBB2_1: # %atomicrmw.start
+; X64-AVX-NEXT:    # =>This Inner Loop Header: Depth=1
+; X64-AVX-NEXT:    lock cmpxchg16b (%rdi)
+; X64-AVX-NEXT:    jne .LBB2_1
+; X64-AVX-NEXT:  # %bb.2: # %atomicrmw.end
+; X64-AVX-NEXT:    movq %rax, -{{[0-9]+}}(%rsp)
+; X64-AVX-NEXT:    movq %rdx, -{{[0-9]+}}(%rsp)
+; X64-AVX-NEXT:    vmovaps -{{[0-9]+}}(%rsp), %xmm0
+; X64-AVX-NEXT:    popq %rbx
+; X64-AVX-NEXT:    .cfi_def_cfa_offset 8
+; X64-AVX-NEXT:    retq
+  %v = atomicrmw xchg ptr %fptr, fp128 %x monotonic, align 16
+  ret fp128 %v
+}
+

diff  --git a/llvm/test/CodeGen/X86/atomic-non-integer.ll b/llvm/test/CodeGen/X86/atomic-non-integer.ll
index 7d2810e57a25b5..9e6f584d83112f 100644
--- a/llvm/test/CodeGen/X86/atomic-non-integer.ll
+++ b/llvm/test/CodeGen/X86/atomic-non-integer.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i386-linux-generic -verify-machineinstrs -mattr=sse | FileCheck %s --check-prefixes=X86,X86-SSE,X86-SSE1
-; RUN: llc < %s -mtriple=i386-linux-generic -verify-machineinstrs -mattr=sse2 | FileCheck %s --check-prefixes=X86,X86-SSE,X86-SSE2
+; RUN: llc < %s -mtriple=i386-linux-generic -verify-machineinstrs -mattr=sse | FileCheck %s --check-prefixes=X86,X86-SSE1
+; RUN: llc < %s -mtriple=i386-linux-generic -verify-machineinstrs -mattr=sse2 | FileCheck %s --check-prefixes=X86,X86-SSE2
 ; RUN: llc < %s -mtriple=i386-linux-generic -verify-machineinstrs -mattr=avx | FileCheck %s --check-prefixes=X86,X86-AVX
 ; RUN: llc < %s -mtriple=i386-linux-generic -verify-machineinstrs -mattr=avx512f | FileCheck %s --check-prefixes=X86,X86-AVX
 ; RUN: llc < %s -mtriple=i386-linux-generic -verify-machineinstrs | FileCheck %s --check-prefixes=X86,X86-NOSSE
@@ -131,94 +131,6 @@ define void @store_double(ptr %fptr, double %v) {
   ret void
 }
 
-define void @store_fp128(ptr %fptr, fp128 %v) {
-; X86-SSE-LABEL: store_fp128:
-; X86-SSE:       # %bb.0:
-; X86-SSE-NEXT:    subl $36, %esp
-; X86-SSE-NEXT:    .cfi_adjust_cfa_offset 36
-; X86-SSE-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X86-SSE-NEXT:    pushl {{[0-9]+}}(%esp)
-; X86-SSE-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-SSE-NEXT:    pushl {{[0-9]+}}(%esp)
-; X86-SSE-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-SSE-NEXT:    pushl {{[0-9]+}}(%esp)
-; X86-SSE-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-SSE-NEXT:    pushl {{[0-9]+}}(%esp)
-; X86-SSE-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-SSE-NEXT:    pushl {{[0-9]+}}(%esp)
-; X86-SSE-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-SSE-NEXT:    pushl %eax
-; X86-SSE-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-SSE-NEXT:    calll __sync_lock_test_and_set_16
-; X86-SSE-NEXT:    .cfi_adjust_cfa_offset -4
-; X86-SSE-NEXT:    addl $56, %esp
-; X86-SSE-NEXT:    .cfi_adjust_cfa_offset -56
-; X86-SSE-NEXT:    retl
-;
-; X86-AVX-LABEL: store_fp128:
-; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    subl $60, %esp
-; X86-AVX-NEXT:    .cfi_def_cfa_offset 64
-; X86-AVX-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0
-; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-AVX-NEXT:    movl %eax, {{[0-9]+}}(%esp)
-; X86-AVX-NEXT:    vmovups %xmm0, {{[0-9]+}}(%esp)
-; X86-AVX-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X86-AVX-NEXT:    movl %eax, (%esp)
-; X86-AVX-NEXT:    calll __sync_lock_test_and_set_16
-; X86-AVX-NEXT:    addl $56, %esp
-; X86-AVX-NEXT:    .cfi_def_cfa_offset 4
-; X86-AVX-NEXT:    retl
-;
-; X86-NOSSE-LABEL: store_fp128:
-; X86-NOSSE:       # %bb.0:
-; X86-NOSSE-NEXT:    subl $36, %esp
-; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset 36
-; X86-NOSSE-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X86-NOSSE-NEXT:    pushl {{[0-9]+}}(%esp)
-; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-NOSSE-NEXT:    pushl {{[0-9]+}}(%esp)
-; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-NOSSE-NEXT:    pushl {{[0-9]+}}(%esp)
-; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-NOSSE-NEXT:    pushl {{[0-9]+}}(%esp)
-; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-NOSSE-NEXT:    pushl {{[0-9]+}}(%esp)
-; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-NOSSE-NEXT:    pushl %eax
-; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-NOSSE-NEXT:    calll __sync_lock_test_and_set_16
-; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset -4
-; X86-NOSSE-NEXT:    addl $56, %esp
-; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset -56
-; X86-NOSSE-NEXT:    retl
-;
-; X64-SSE-LABEL: store_fp128:
-; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    subq $24, %rsp
-; X64-SSE-NEXT:    .cfi_def_cfa_offset 32
-; X64-SSE-NEXT:    movaps %xmm0, (%rsp)
-; X64-SSE-NEXT:    movq (%rsp), %rsi
-; X64-SSE-NEXT:    movq {{[0-9]+}}(%rsp), %rdx
-; X64-SSE-NEXT:    callq __sync_lock_test_and_set_16 at PLT
-; X64-SSE-NEXT:    addq $24, %rsp
-; X64-SSE-NEXT:    .cfi_def_cfa_offset 8
-; X64-SSE-NEXT:    retq
-;
-; X64-AVX-LABEL: store_fp128:
-; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    subq $24, %rsp
-; X64-AVX-NEXT:    .cfi_def_cfa_offset 32
-; X64-AVX-NEXT:    vmovaps %xmm0, (%rsp)
-; X64-AVX-NEXT:    movq (%rsp), %rsi
-; X64-AVX-NEXT:    movq {{[0-9]+}}(%rsp), %rdx
-; X64-AVX-NEXT:    callq __sync_lock_test_and_set_16 at PLT
-; X64-AVX-NEXT:    addq $24, %rsp
-; X64-AVX-NEXT:    .cfi_def_cfa_offset 8
-; X64-AVX-NEXT:    retq
-  store atomic fp128 %v, ptr %fptr unordered, align 16
-  ret void
-}
 
 define half @load_half(ptr %fptr) {
 ; X86-SSE1-LABEL: load_half:
@@ -393,220 +305,273 @@ define double @load_double(ptr %fptr) {
   ret double %v
 }
 
-define fp128 @load_fp128(ptr %fptr) {
-; X86-SSE1-LABEL: load_fp128:
+define half @exchange_half(ptr %fptr, half %x) {
+; X86-SSE1-LABEL: exchange_half:
+; X86-SSE1:       # %bb.0:
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE1-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT:    xchgw %ax, (%ecx)
+; X86-SSE1-NEXT:    retl
+;
+; X86-SSE2-LABEL: exchange_half:
+; X86-SSE2:       # %bb.0:
+; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT:    xchgw %cx, (%eax)
+; X86-SSE2-NEXT:    pinsrw $0, %ecx, %xmm0
+; X86-SSE2-NEXT:    retl
+;
+; X86-AVX-LABEL: exchange_half:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
+; X86-AVX-NEXT:    xchgw %cx, (%eax)
+; X86-AVX-NEXT:    vpinsrw $0, %ecx, %xmm0, %xmm0
+; X86-AVX-NEXT:    retl
+;
+; X86-NOSSE-LABEL: exchange_half:
+; X86-NOSSE:       # %bb.0:
+; X86-NOSSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT:    xchgw %ax, (%ecx)
+; X86-NOSSE-NEXT:    retl
+;
+; X64-SSE-LABEL: exchange_half:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    pextrw $0, %xmm0, %eax
+; X64-SSE-NEXT:    xchgw %ax, (%rdi)
+; X64-SSE-NEXT:    pinsrw $0, %eax, %xmm0
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: exchange_half:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vpextrw $0, %xmm0, %eax
+; X64-AVX-NEXT:    xchgw %ax, (%rdi)
+; X64-AVX-NEXT:    vpinsrw $0, %eax, %xmm0, %xmm0
+; X64-AVX-NEXT:    retq
+  %v = atomicrmw xchg ptr %fptr, half %x monotonic, align 2
+  ret half %v
+}
+
+define float @exchange_float(ptr %fptr, float %x) {
+; X86-SSE1-LABEL: exchange_float:
 ; X86-SSE1:       # %bb.0:
-; X86-SSE1-NEXT:    pushl %edi
+; X86-SSE1-NEXT:    pushl %eax
+; X86-SSE1-NEXT:    .cfi_def_cfa_offset 8
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE1-NEXT:    xchgl %ecx, (%eax)
+; X86-SSE1-NEXT:    movl %ecx, (%esp)
+; X86-SSE1-NEXT:    flds (%esp)
+; X86-SSE1-NEXT:    popl %eax
+; X86-SSE1-NEXT:    .cfi_def_cfa_offset 4
+; X86-SSE1-NEXT:    retl
+;
+; X86-SSE2-LABEL: exchange_float:
+; X86-SSE2:       # %bb.0:
+; X86-SSE2-NEXT:    pushl %eax
+; X86-SSE2-NEXT:    .cfi_def_cfa_offset 8
+; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT:    xchgl %ecx, (%eax)
+; X86-SSE2-NEXT:    movd %ecx, %xmm0
+; X86-SSE2-NEXT:    movd %xmm0, (%esp)
+; X86-SSE2-NEXT:    flds (%esp)
+; X86-SSE2-NEXT:    popl %eax
+; X86-SSE2-NEXT:    .cfi_def_cfa_offset 4
+; X86-SSE2-NEXT:    retl
+;
+; X86-AVX-LABEL: exchange_float:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    pushl %eax
+; X86-AVX-NEXT:    .cfi_def_cfa_offset 8
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX-NEXT:    xchgl %ecx, (%eax)
+; X86-AVX-NEXT:    vmovd %ecx, %xmm0
+; X86-AVX-NEXT:    vmovd %xmm0, (%esp)
+; X86-AVX-NEXT:    flds (%esp)
+; X86-AVX-NEXT:    popl %eax
+; X86-AVX-NEXT:    .cfi_def_cfa_offset 4
+; X86-AVX-NEXT:    retl
+;
+; X86-NOSSE-LABEL: exchange_float:
+; X86-NOSSE:       # %bb.0:
+; X86-NOSSE-NEXT:    pushl %eax
+; X86-NOSSE-NEXT:    .cfi_def_cfa_offset 8
+; X86-NOSSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT:    xchgl %ecx, (%eax)
+; X86-NOSSE-NEXT:    movl %ecx, (%esp)
+; X86-NOSSE-NEXT:    flds (%esp)
+; X86-NOSSE-NEXT:    popl %eax
+; X86-NOSSE-NEXT:    .cfi_def_cfa_offset 4
+; X86-NOSSE-NEXT:    retl
+;
+; X64-SSE-LABEL: exchange_float:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movd %xmm0, %eax
+; X64-SSE-NEXT:    xchgl %eax, (%rdi)
+; X64-SSE-NEXT:    movd %eax, %xmm0
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: exchange_float:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovd %xmm0, %eax
+; X64-AVX-NEXT:    xchgl %eax, (%rdi)
+; X64-AVX-NEXT:    vmovd %eax, %xmm0
+; X64-AVX-NEXT:    retq
+  %v = atomicrmw xchg ptr %fptr, float %x monotonic, align 4
+  ret float %v
+}
+
+define double @exchange_double(ptr %fptr, double %x) {
+; X86-SSE1-LABEL: exchange_double:
+; X86-SSE1:       # %bb.0:
+; X86-SSE1-NEXT:    pushl %ebx
 ; X86-SSE1-NEXT:    .cfi_def_cfa_offset 8
 ; X86-SSE1-NEXT:    pushl %esi
 ; X86-SSE1-NEXT:    .cfi_def_cfa_offset 12
-; X86-SSE1-NEXT:    subl $20, %esp
-; X86-SSE1-NEXT:    .cfi_def_cfa_offset 32
+; X86-SSE1-NEXT:    subl $12, %esp
+; X86-SSE1-NEXT:    .cfi_def_cfa_offset 24
 ; X86-SSE1-NEXT:    .cfi_offset %esi, -12
-; X86-SSE1-NEXT:    .cfi_offset %edi, -8
+; X86-SSE1-NEXT:    .cfi_offset %ebx, -8
 ; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X86-SSE1-NEXT:    subl $8, %esp
-; X86-SSE1-NEXT:    .cfi_adjust_cfa_offset 8
-; X86-SSE1-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X86-SSE1-NEXT:    pushl $0
-; X86-SSE1-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-SSE1-NEXT:    pushl $0
-; X86-SSE1-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-SSE1-NEXT:    pushl $0
-; X86-SSE1-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-SSE1-NEXT:    pushl $0
-; X86-SSE1-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-SSE1-NEXT:    pushl $0
-; X86-SSE1-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-SSE1-NEXT:    pushl $0
-; X86-SSE1-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-SSE1-NEXT:    pushl $0
-; X86-SSE1-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-SSE1-NEXT:    pushl $0
-; X86-SSE1-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-SSE1-NEXT:    pushl {{[0-9]+}}(%esp)
-; X86-SSE1-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-SSE1-NEXT:    pushl %eax
-; X86-SSE1-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-SSE1-NEXT:    calll __sync_val_compare_and_swap_16
-; X86-SSE1-NEXT:    .cfi_adjust_cfa_offset -4
-; X86-SSE1-NEXT:    addl $44, %esp
-; X86-SSE1-NEXT:    .cfi_adjust_cfa_offset -44
-; X86-SSE1-NEXT:    movl (%esp), %eax
+; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ebx
 ; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; X86-SSE1-NEXT:    movl %edi, 8(%esi)
-; X86-SSE1-NEXT:    movl %edx, 12(%esi)
-; X86-SSE1-NEXT:    movl %eax, (%esi)
-; X86-SSE1-NEXT:    movl %ecx, 4(%esi)
-; X86-SSE1-NEXT:    movl %esi, %eax
-; X86-SSE1-NEXT:    addl $20, %esp
+; X86-SSE1-NEXT:    movl (%esi), %eax
+; X86-SSE1-NEXT:    movl 4(%esi), %edx
+; X86-SSE1-NEXT:    .p2align 4, 0x90
+; X86-SSE1-NEXT:  .LBB8_1: # %atomicrmw.start
+; X86-SSE1-NEXT:    # =>This Inner Loop Header: Depth=1
+; X86-SSE1-NEXT:    lock cmpxchg8b (%esi)
+; X86-SSE1-NEXT:    jne .LBB8_1
+; X86-SSE1-NEXT:  # %bb.2: # %atomicrmw.end
+; X86-SSE1-NEXT:    movl %eax, (%esp)
+; X86-SSE1-NEXT:    movl %edx, {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT:    fldl (%esp)
+; X86-SSE1-NEXT:    addl $12, %esp
 ; X86-SSE1-NEXT:    .cfi_def_cfa_offset 12
 ; X86-SSE1-NEXT:    popl %esi
 ; X86-SSE1-NEXT:    .cfi_def_cfa_offset 8
-; X86-SSE1-NEXT:    popl %edi
+; X86-SSE1-NEXT:    popl %ebx
 ; X86-SSE1-NEXT:    .cfi_def_cfa_offset 4
-; X86-SSE1-NEXT:    retl $4
+; X86-SSE1-NEXT:    retl
 ;
-; X86-SSE2-LABEL: load_fp128:
+; X86-SSE2-LABEL: exchange_double:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pushl %esi
+; X86-SSE2-NEXT:    pushl %ebx
 ; X86-SSE2-NEXT:    .cfi_def_cfa_offset 8
-; X86-SSE2-NEXT:    subl $24, %esp
-; X86-SSE2-NEXT:    .cfi_def_cfa_offset 32
-; X86-SSE2-NEXT:    .cfi_offset %esi, -8
+; X86-SSE2-NEXT:    pushl %esi
+; X86-SSE2-NEXT:    .cfi_def_cfa_offset 12
+; X86-SSE2-NEXT:    subl $12, %esp
+; X86-SSE2-NEXT:    .cfi_def_cfa_offset 24
+; X86-SSE2-NEXT:    .cfi_offset %esi, -12
+; X86-SSE2-NEXT:    .cfi_offset %ebx, -8
 ; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X86-SSE2-NEXT:    subl $8, %esp
-; X86-SSE2-NEXT:    .cfi_adjust_cfa_offset 8
-; X86-SSE2-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X86-SSE2-NEXT:    pushl $0
-; X86-SSE2-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-SSE2-NEXT:    pushl $0
-; X86-SSE2-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-SSE2-NEXT:    pushl $0
-; X86-SSE2-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-SSE2-NEXT:    pushl $0
-; X86-SSE2-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-SSE2-NEXT:    pushl $0
-; X86-SSE2-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-SSE2-NEXT:    pushl $0
-; X86-SSE2-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-SSE2-NEXT:    pushl $0
-; X86-SSE2-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-SSE2-NEXT:    pushl $0
-; X86-SSE2-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-SSE2-NEXT:    pushl {{[0-9]+}}(%esp)
-; X86-SSE2-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-SSE2-NEXT:    pushl %eax
-; X86-SSE2-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-SSE2-NEXT:    calll __sync_val_compare_and_swap_16
-; X86-SSE2-NEXT:    .cfi_adjust_cfa_offset -4
-; X86-SSE2-NEXT:    addl $44, %esp
-; X86-SSE2-NEXT:    .cfi_adjust_cfa_offset -44
-; X86-SSE2-NEXT:    movaps (%esp), %xmm0
-; X86-SSE2-NEXT:    movaps %xmm0, (%esi)
-; X86-SSE2-NEXT:    movl %esi, %eax
-; X86-SSE2-NEXT:    addl $24, %esp
-; X86-SSE2-NEXT:    .cfi_def_cfa_offset 8
+; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %ebx
+; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT:    movl (%esi), %eax
+; X86-SSE2-NEXT:    movl 4(%esi), %edx
+; X86-SSE2-NEXT:    .p2align 4, 0x90
+; X86-SSE2-NEXT:  .LBB8_1: # %atomicrmw.start
+; X86-SSE2-NEXT:    # =>This Inner Loop Header: Depth=1
+; X86-SSE2-NEXT:    lock cmpxchg8b (%esi)
+; X86-SSE2-NEXT:    jne .LBB8_1
+; X86-SSE2-NEXT:  # %bb.2: # %atomicrmw.end
+; X86-SSE2-NEXT:    movd %eax, %xmm0
+; X86-SSE2-NEXT:    movd %edx, %xmm1
+; X86-SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X86-SSE2-NEXT:    movq %xmm0, (%esp)
+; X86-SSE2-NEXT:    fldl (%esp)
+; X86-SSE2-NEXT:    addl $12, %esp
+; X86-SSE2-NEXT:    .cfi_def_cfa_offset 12
 ; X86-SSE2-NEXT:    popl %esi
+; X86-SSE2-NEXT:    .cfi_def_cfa_offset 8
+; X86-SSE2-NEXT:    popl %ebx
 ; X86-SSE2-NEXT:    .cfi_def_cfa_offset 4
-; X86-SSE2-NEXT:    retl $4
+; X86-SSE2-NEXT:    retl
 ;
-; X86-AVX-LABEL: load_fp128:
+; X86-AVX-LABEL: exchange_double:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    pushl %esi
+; X86-AVX-NEXT:    pushl %ebx
 ; X86-AVX-NEXT:    .cfi_def_cfa_offset 8
-; X86-AVX-NEXT:    subl $72, %esp
-; X86-AVX-NEXT:    .cfi_def_cfa_offset 80
-; X86-AVX-NEXT:    .cfi_offset %esi, -8
+; X86-AVX-NEXT:    pushl %esi
+; X86-AVX-NEXT:    .cfi_def_cfa_offset 12
+; X86-AVX-NEXT:    subl $12, %esp
+; X86-AVX-NEXT:    .cfi_def_cfa_offset 24
+; X86-AVX-NEXT:    .cfi_offset %esi, -12
+; X86-AVX-NEXT:    .cfi_offset %ebx, -8
 ; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
-; X86-AVX-NEXT:    vmovups %ymm0, {{[0-9]+}}(%esp)
-; X86-AVX-NEXT:    movl %eax, {{[0-9]+}}(%esp)
-; X86-AVX-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X86-AVX-NEXT:    movl %eax, (%esp)
-; X86-AVX-NEXT:    vzeroupper
-; X86-AVX-NEXT:    calll __sync_val_compare_and_swap_16
-; X86-AVX-NEXT:    subl $4, %esp
-; X86-AVX-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0
-; X86-AVX-NEXT:    vmovaps %xmm0, (%esi)
-; X86-AVX-NEXT:    movl %esi, %eax
-; X86-AVX-NEXT:    addl $72, %esp
-; X86-AVX-NEXT:    .cfi_def_cfa_offset 8
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ebx
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX-NEXT:    movl (%esi), %eax
+; X86-AVX-NEXT:    movl 4(%esi), %edx
+; X86-AVX-NEXT:    .p2align 4, 0x90
+; X86-AVX-NEXT:  .LBB8_1: # %atomicrmw.start
+; X86-AVX-NEXT:    # =>This Inner Loop Header: Depth=1
+; X86-AVX-NEXT:    lock cmpxchg8b (%esi)
+; X86-AVX-NEXT:    jne .LBB8_1
+; X86-AVX-NEXT:  # %bb.2: # %atomicrmw.end
+; X86-AVX-NEXT:    vmovd %eax, %xmm0
+; X86-AVX-NEXT:    vpinsrd $1, %edx, %xmm0, %xmm0
+; X86-AVX-NEXT:    vmovq %xmm0, (%esp)
+; X86-AVX-NEXT:    fldl (%esp)
+; X86-AVX-NEXT:    addl $12, %esp
+; X86-AVX-NEXT:    .cfi_def_cfa_offset 12
 ; X86-AVX-NEXT:    popl %esi
+; X86-AVX-NEXT:    .cfi_def_cfa_offset 8
+; X86-AVX-NEXT:    popl %ebx
 ; X86-AVX-NEXT:    .cfi_def_cfa_offset 4
-; X86-AVX-NEXT:    retl $4
+; X86-AVX-NEXT:    retl
 ;
-; X86-NOSSE-LABEL: load_fp128:
+; X86-NOSSE-LABEL: exchange_double:
 ; X86-NOSSE:       # %bb.0:
-; X86-NOSSE-NEXT:    pushl %edi
+; X86-NOSSE-NEXT:    pushl %ebx
 ; X86-NOSSE-NEXT:    .cfi_def_cfa_offset 8
 ; X86-NOSSE-NEXT:    pushl %esi
 ; X86-NOSSE-NEXT:    .cfi_def_cfa_offset 12
-; X86-NOSSE-NEXT:    subl $20, %esp
-; X86-NOSSE-NEXT:    .cfi_def_cfa_offset 32
+; X86-NOSSE-NEXT:    subl $12, %esp
+; X86-NOSSE-NEXT:    .cfi_def_cfa_offset 24
 ; X86-NOSSE-NEXT:    .cfi_offset %esi, -12
-; X86-NOSSE-NEXT:    .cfi_offset %edi, -8
+; X86-NOSSE-NEXT:    .cfi_offset %ebx, -8
 ; X86-NOSSE-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X86-NOSSE-NEXT:    subl $8, %esp
-; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset 8
-; X86-NOSSE-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X86-NOSSE-NEXT:    pushl $0
-; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-NOSSE-NEXT:    pushl $0
-; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-NOSSE-NEXT:    pushl $0
-; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-NOSSE-NEXT:    pushl $0
-; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-NOSSE-NEXT:    pushl $0
-; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-NOSSE-NEXT:    pushl $0
-; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-NOSSE-NEXT:    pushl $0
-; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-NOSSE-NEXT:    pushl $0
-; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-NOSSE-NEXT:    pushl {{[0-9]+}}(%esp)
-; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-NOSSE-NEXT:    pushl %eax
-; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-NOSSE-NEXT:    calll __sync_val_compare_and_swap_16
-; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset -4
-; X86-NOSSE-NEXT:    addl $44, %esp
-; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset -44
-; X86-NOSSE-NEXT:    movl (%esp), %eax
+; X86-NOSSE-NEXT:    movl {{[0-9]+}}(%esp), %ebx
 ; X86-NOSSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NOSSE-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-NOSSE-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; X86-NOSSE-NEXT:    movl %edi, 8(%esi)
-; X86-NOSSE-NEXT:    movl %edx, 12(%esi)
-; X86-NOSSE-NEXT:    movl %eax, (%esi)
-; X86-NOSSE-NEXT:    movl %ecx, 4(%esi)
-; X86-NOSSE-NEXT:    movl %esi, %eax
-; X86-NOSSE-NEXT:    addl $20, %esp
+; X86-NOSSE-NEXT:    movl (%esi), %eax
+; X86-NOSSE-NEXT:    movl 4(%esi), %edx
+; X86-NOSSE-NEXT:    .p2align 4, 0x90
+; X86-NOSSE-NEXT:  .LBB8_1: # %atomicrmw.start
+; X86-NOSSE-NEXT:    # =>This Inner Loop Header: Depth=1
+; X86-NOSSE-NEXT:    lock cmpxchg8b (%esi)
+; X86-NOSSE-NEXT:    jne .LBB8_1
+; X86-NOSSE-NEXT:  # %bb.2: # %atomicrmw.end
+; X86-NOSSE-NEXT:    movl %eax, (%esp)
+; X86-NOSSE-NEXT:    movl %edx, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT:    fldl (%esp)
+; X86-NOSSE-NEXT:    addl $12, %esp
 ; X86-NOSSE-NEXT:    .cfi_def_cfa_offset 12
 ; X86-NOSSE-NEXT:    popl %esi
 ; X86-NOSSE-NEXT:    .cfi_def_cfa_offset 8
-; X86-NOSSE-NEXT:    popl %edi
+; X86-NOSSE-NEXT:    popl %ebx
 ; X86-NOSSE-NEXT:    .cfi_def_cfa_offset 4
-; X86-NOSSE-NEXT:    retl $4
+; X86-NOSSE-NEXT:    retl
 ;
-; X64-SSE-LABEL: load_fp128:
+; X64-SSE-LABEL: exchange_double:
 ; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    subq $24, %rsp
-; X64-SSE-NEXT:    .cfi_def_cfa_offset 32
-; X64-SSE-NEXT:    xorl %esi, %esi
-; X64-SSE-NEXT:    xorl %edx, %edx
-; X64-SSE-NEXT:    xorl %ecx, %ecx
-; X64-SSE-NEXT:    xorl %r8d, %r8d
-; X64-SSE-NEXT:    callq __sync_val_compare_and_swap_16 at PLT
-; X64-SSE-NEXT:    movq %rdx, {{[0-9]+}}(%rsp)
-; X64-SSE-NEXT:    movq %rax, (%rsp)
-; X64-SSE-NEXT:    movaps (%rsp), %xmm0
-; X64-SSE-NEXT:    addq $24, %rsp
-; X64-SSE-NEXT:    .cfi_def_cfa_offset 8
+; X64-SSE-NEXT:    movq %xmm0, %rax
+; X64-SSE-NEXT:    xchgq %rax, (%rdi)
+; X64-SSE-NEXT:    movq %rax, %xmm0
 ; X64-SSE-NEXT:    retq
 ;
-; X64-AVX-LABEL: load_fp128:
+; X64-AVX-LABEL: exchange_double:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    subq $24, %rsp
-; X64-AVX-NEXT:    .cfi_def_cfa_offset 32
-; X64-AVX-NEXT:    xorl %esi, %esi
-; X64-AVX-NEXT:    xorl %edx, %edx
-; X64-AVX-NEXT:    xorl %ecx, %ecx
-; X64-AVX-NEXT:    xorl %r8d, %r8d
-; X64-AVX-NEXT:    callq __sync_val_compare_and_swap_16 at PLT
-; X64-AVX-NEXT:    movq %rdx, {{[0-9]+}}(%rsp)
-; X64-AVX-NEXT:    movq %rax, (%rsp)
-; X64-AVX-NEXT:    vmovaps (%rsp), %xmm0
-; X64-AVX-NEXT:    addq $24, %rsp
-; X64-AVX-NEXT:    .cfi_def_cfa_offset 8
+; X64-AVX-NEXT:    vmovq %xmm0, %rax
+; X64-AVX-NEXT:    xchgq %rax, (%rdi)
+; X64-AVX-NEXT:    vmovq %rax, %xmm0
 ; X64-AVX-NEXT:    retq
-  %v = load atomic fp128, ptr %fptr unordered, align 16
-  ret fp128 %v
+  %v = atomicrmw xchg ptr %fptr, double %x monotonic, align 8
+  ret double %v
 }
 
 

diff  --git a/llvm/test/CodeGen/X86/atomic128.ll b/llvm/test/CodeGen/X86/atomic128.ll
index d5600b54a169d2..1f7c2254bc79fa 100644
--- a/llvm/test/CodeGen/X86/atomic128.ll
+++ b/llvm/test/CodeGen/X86/atomic128.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64-apple-macosx10.9 -verify-machineinstrs -mattr=cx16 | FileCheck %s
-; RUN: llc < %s -mtriple=i386-linux-gnu -verify-machineinstrs -mattr=cx16 | FileCheck %s -check-prefixes=CHECK32
-; RUN: llc < %s -mtriple=i386-linux-gnu -verify-machineinstrs -mattr=-cx16 | FileCheck %s -check-prefixes=CHECK32
+
+; Codegen of i128 without cx16 is tested in atomic-nocx16.ll
 
 @var = global i128 0
 
@@ -20,61 +20,6 @@ define i128 @val_compare_and_swap(ptr %p, i128 %oldval, i128 %newval) {
 ; CHECK-NEXT:    lock cmpxchg16b (%rdi)
 ; CHECK-NEXT:    popq %rbx
 ; CHECK-NEXT:    retq
-;
-; CHECK32-LABEL: val_compare_and_swap:
-; CHECK32:       # %bb.0:
-; CHECK32-NEXT:    pushl %edi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 8
-; CHECK32-NEXT:    pushl %esi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 12
-; CHECK32-NEXT:    subl $20, %esp
-; CHECK32-NEXT:    .cfi_def_cfa_offset 32
-; CHECK32-NEXT:    .cfi_offset %esi, -12
-; CHECK32-NEXT:    .cfi_offset %edi, -8
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; CHECK32-NEXT:    subl $8, %esp
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 8
-; CHECK32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl %eax
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    calll __sync_val_compare_and_swap_16
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset -4
-; CHECK32-NEXT:    addl $44, %esp
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset -44
-; CHECK32-NEXT:    movl (%esp), %eax
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; CHECK32-NEXT:    movl %edi, 8(%esi)
-; CHECK32-NEXT:    movl %edx, 12(%esi)
-; CHECK32-NEXT:    movl %eax, (%esi)
-; CHECK32-NEXT:    movl %ecx, 4(%esi)
-; CHECK32-NEXT:    movl %esi, %eax
-; CHECK32-NEXT:    addl $20, %esp
-; CHECK32-NEXT:    .cfi_def_cfa_offset 12
-; CHECK32-NEXT:    popl %esi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 8
-; CHECK32-NEXT:    popl %edi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 4
-; CHECK32-NEXT:    retl $4
   %pair = cmpxchg ptr %p, i128 %oldval, i128 %newval acquire acquire
   %val = extractvalue { i128, i1 } %pair, 0
   ret i128 %val
@@ -94,24 +39,6 @@ define void @cmpxchg16b_global_with_offset() nounwind {
 ; CHECK-NEXT:    lock cmpxchg16b _cmpxchg16b_global+16(%rip)
 ; CHECK-NEXT:    popq %rbx
 ; CHECK-NEXT:    retq
-;
-; CHECK32-LABEL: cmpxchg16b_global_with_offset:
-; CHECK32:       # %bb.0: # %entry
-; CHECK32-NEXT:    subl $36, %esp
-; CHECK32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; CHECK32-NEXT:    pushl $0
-; CHECK32-NEXT:    pushl $0
-; CHECK32-NEXT:    pushl $0
-; CHECK32-NEXT:    pushl $0
-; CHECK32-NEXT:    pushl $0
-; CHECK32-NEXT:    pushl $0
-; CHECK32-NEXT:    pushl $0
-; CHECK32-NEXT:    pushl $0
-; CHECK32-NEXT:    pushl $cmpxchg16b_global+16
-; CHECK32-NEXT:    pushl %eax
-; CHECK32-NEXT:    calll __sync_val_compare_and_swap_16
-; CHECK32-NEXT:    addl $72, %esp
-; CHECK32-NEXT:    retl
 entry:
   %0 = load atomic i128, ptr getelementptr inbounds ({i128, i128}, ptr @cmpxchg16b_global, i64 0, i32 1) acquire, align 16
   ret void
@@ -142,46 +69,6 @@ define void @fetch_and_nand(ptr %p, i128 %bits) {
 ; CHECK-NEXT:    movq %rdx, _var+8(%rip)
 ; CHECK-NEXT:    popq %rbx
 ; CHECK-NEXT:    retq
-;
-; CHECK32-LABEL: fetch_and_nand:
-; CHECK32:       # %bb.0:
-; CHECK32-NEXT:    pushl %esi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 8
-; CHECK32-NEXT:    subl $24, %esp
-; CHECK32-NEXT:    .cfi_def_cfa_offset 32
-; CHECK32-NEXT:    .cfi_offset %esi, -8
-; CHECK32-NEXT:    subl $8, %esp
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 8
-; CHECK32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl %eax
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    calll __sync_fetch_and_nand_16
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset -4
-; CHECK32-NEXT:    addl $28, %esp
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset -28
-; CHECK32-NEXT:    movl (%esp), %eax
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; CHECK32-NEXT:    movl %esi, var+8
-; CHECK32-NEXT:    movl %edx, var+12
-; CHECK32-NEXT:    movl %eax, var
-; CHECK32-NEXT:    movl %ecx, var+4
-; CHECK32-NEXT:    addl $24, %esp
-; CHECK32-NEXT:    .cfi_def_cfa_offset 8
-; CHECK32-NEXT:    popl %esi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 4
-; CHECK32-NEXT:    retl
   %val = atomicrmw nand ptr %p, i128 %bits release
   store i128 %val, ptr @var, align 16
   ret void
@@ -210,46 +97,6 @@ define void @fetch_and_or(ptr %p, i128 %bits) {
 ; CHECK-NEXT:    movq %rdx, _var+8(%rip)
 ; CHECK-NEXT:    popq %rbx
 ; CHECK-NEXT:    retq
-;
-; CHECK32-LABEL: fetch_and_or:
-; CHECK32:       # %bb.0:
-; CHECK32-NEXT:    pushl %esi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 8
-; CHECK32-NEXT:    subl $24, %esp
-; CHECK32-NEXT:    .cfi_def_cfa_offset 32
-; CHECK32-NEXT:    .cfi_offset %esi, -8
-; CHECK32-NEXT:    subl $8, %esp
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 8
-; CHECK32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl %eax
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    calll __sync_fetch_and_or_16
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset -4
-; CHECK32-NEXT:    addl $28, %esp
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset -28
-; CHECK32-NEXT:    movl (%esp), %eax
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; CHECK32-NEXT:    movl %esi, var+8
-; CHECK32-NEXT:    movl %edx, var+12
-; CHECK32-NEXT:    movl %eax, var
-; CHECK32-NEXT:    movl %ecx, var+4
-; CHECK32-NEXT:    addl $24, %esp
-; CHECK32-NEXT:    .cfi_def_cfa_offset 8
-; CHECK32-NEXT:    popl %esi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 4
-; CHECK32-NEXT:    retl
   %val = atomicrmw or ptr %p, i128 %bits seq_cst
   store i128 %val, ptr @var, align 16
   ret void
@@ -278,46 +125,6 @@ define void @fetch_and_add(ptr %p, i128 %bits) {
 ; CHECK-NEXT:    movq %rdx, _var+8(%rip)
 ; CHECK-NEXT:    popq %rbx
 ; CHECK-NEXT:    retq
-;
-; CHECK32-LABEL: fetch_and_add:
-; CHECK32:       # %bb.0:
-; CHECK32-NEXT:    pushl %esi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 8
-; CHECK32-NEXT:    subl $24, %esp
-; CHECK32-NEXT:    .cfi_def_cfa_offset 32
-; CHECK32-NEXT:    .cfi_offset %esi, -8
-; CHECK32-NEXT:    subl $8, %esp
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 8
-; CHECK32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl %eax
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    calll __sync_fetch_and_add_16
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset -4
-; CHECK32-NEXT:    addl $28, %esp
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset -28
-; CHECK32-NEXT:    movl (%esp), %eax
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; CHECK32-NEXT:    movl %esi, var+8
-; CHECK32-NEXT:    movl %edx, var+12
-; CHECK32-NEXT:    movl %eax, var
-; CHECK32-NEXT:    movl %ecx, var+4
-; CHECK32-NEXT:    addl $24, %esp
-; CHECK32-NEXT:    .cfi_def_cfa_offset 8
-; CHECK32-NEXT:    popl %esi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 4
-; CHECK32-NEXT:    retl
   %val = atomicrmw add ptr %p, i128 %bits seq_cst
   store i128 %val, ptr @var, align 16
   ret void
@@ -346,46 +153,6 @@ define void @fetch_and_sub(ptr %p, i128 %bits) {
 ; CHECK-NEXT:    movq %rdx, _var+8(%rip)
 ; CHECK-NEXT:    popq %rbx
 ; CHECK-NEXT:    retq
-;
-; CHECK32-LABEL: fetch_and_sub:
-; CHECK32:       # %bb.0:
-; CHECK32-NEXT:    pushl %esi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 8
-; CHECK32-NEXT:    subl $24, %esp
-; CHECK32-NEXT:    .cfi_def_cfa_offset 32
-; CHECK32-NEXT:    .cfi_offset %esi, -8
-; CHECK32-NEXT:    subl $8, %esp
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 8
-; CHECK32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl %eax
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    calll __sync_fetch_and_sub_16
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset -4
-; CHECK32-NEXT:    addl $28, %esp
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset -28
-; CHECK32-NEXT:    movl (%esp), %eax
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; CHECK32-NEXT:    movl %esi, var+8
-; CHECK32-NEXT:    movl %edx, var+12
-; CHECK32-NEXT:    movl %eax, var
-; CHECK32-NEXT:    movl %ecx, var+4
-; CHECK32-NEXT:    addl $24, %esp
-; CHECK32-NEXT:    .cfi_def_cfa_offset 8
-; CHECK32-NEXT:    popl %esi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 4
-; CHECK32-NEXT:    retl
   %val = atomicrmw sub ptr %p, i128 %bits seq_cst
   store i128 %val, ptr @var, align 16
   ret void
@@ -417,46 +184,6 @@ define void @fetch_and_min(ptr %p, i128 %bits) {
 ; CHECK-NEXT:    movq %rdx, _var+8(%rip)
 ; CHECK-NEXT:    popq %rbx
 ; CHECK-NEXT:    retq
-;
-; CHECK32-LABEL: fetch_and_min:
-; CHECK32:       # %bb.0:
-; CHECK32-NEXT:    pushl %esi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 8
-; CHECK32-NEXT:    subl $24, %esp
-; CHECK32-NEXT:    .cfi_def_cfa_offset 32
-; CHECK32-NEXT:    .cfi_offset %esi, -8
-; CHECK32-NEXT:    subl $8, %esp
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 8
-; CHECK32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl %eax
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    calll __sync_fetch_and_min_16
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset -4
-; CHECK32-NEXT:    addl $28, %esp
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset -28
-; CHECK32-NEXT:    movl (%esp), %eax
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; CHECK32-NEXT:    movl %esi, var+8
-; CHECK32-NEXT:    movl %edx, var+12
-; CHECK32-NEXT:    movl %eax, var
-; CHECK32-NEXT:    movl %ecx, var+4
-; CHECK32-NEXT:    addl $24, %esp
-; CHECK32-NEXT:    .cfi_def_cfa_offset 8
-; CHECK32-NEXT:    popl %esi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 4
-; CHECK32-NEXT:    retl
   %val = atomicrmw min ptr %p, i128 %bits seq_cst
   store i128 %val, ptr @var, align 16
   ret void
@@ -488,46 +215,6 @@ define void @fetch_and_max(ptr %p, i128 %bits) {
 ; CHECK-NEXT:    movq %rdx, _var+8(%rip)
 ; CHECK-NEXT:    popq %rbx
 ; CHECK-NEXT:    retq
-;
-; CHECK32-LABEL: fetch_and_max:
-; CHECK32:       # %bb.0:
-; CHECK32-NEXT:    pushl %esi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 8
-; CHECK32-NEXT:    subl $24, %esp
-; CHECK32-NEXT:    .cfi_def_cfa_offset 32
-; CHECK32-NEXT:    .cfi_offset %esi, -8
-; CHECK32-NEXT:    subl $8, %esp
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 8
-; CHECK32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl %eax
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    calll __sync_fetch_and_max_16
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset -4
-; CHECK32-NEXT:    addl $28, %esp
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset -28
-; CHECK32-NEXT:    movl (%esp), %eax
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; CHECK32-NEXT:    movl %esi, var+8
-; CHECK32-NEXT:    movl %edx, var+12
-; CHECK32-NEXT:    movl %eax, var
-; CHECK32-NEXT:    movl %ecx, var+4
-; CHECK32-NEXT:    addl $24, %esp
-; CHECK32-NEXT:    .cfi_def_cfa_offset 8
-; CHECK32-NEXT:    popl %esi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 4
-; CHECK32-NEXT:    retl
   %val = atomicrmw max ptr %p, i128 %bits seq_cst
   store i128 %val, ptr @var, align 16
   ret void
@@ -559,46 +246,6 @@ define void @fetch_and_umin(ptr %p, i128 %bits) {
 ; CHECK-NEXT:    movq %rdx, _var+8(%rip)
 ; CHECK-NEXT:    popq %rbx
 ; CHECK-NEXT:    retq
-;
-; CHECK32-LABEL: fetch_and_umin:
-; CHECK32:       # %bb.0:
-; CHECK32-NEXT:    pushl %esi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 8
-; CHECK32-NEXT:    subl $24, %esp
-; CHECK32-NEXT:    .cfi_def_cfa_offset 32
-; CHECK32-NEXT:    .cfi_offset %esi, -8
-; CHECK32-NEXT:    subl $8, %esp
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 8
-; CHECK32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl %eax
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    calll __sync_fetch_and_umin_16
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset -4
-; CHECK32-NEXT:    addl $28, %esp
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset -28
-; CHECK32-NEXT:    movl (%esp), %eax
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; CHECK32-NEXT:    movl %esi, var+8
-; CHECK32-NEXT:    movl %edx, var+12
-; CHECK32-NEXT:    movl %eax, var
-; CHECK32-NEXT:    movl %ecx, var+4
-; CHECK32-NEXT:    addl $24, %esp
-; CHECK32-NEXT:    .cfi_def_cfa_offset 8
-; CHECK32-NEXT:    popl %esi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 4
-; CHECK32-NEXT:    retl
   %val = atomicrmw umin ptr %p, i128 %bits seq_cst
   store i128 %val, ptr @var, align 16
   ret void
@@ -630,46 +277,6 @@ define void @fetch_and_umax(ptr %p, i128 %bits) {
 ; CHECK-NEXT:    movq %rdx, _var+8(%rip)
 ; CHECK-NEXT:    popq %rbx
 ; CHECK-NEXT:    retq
-;
-; CHECK32-LABEL: fetch_and_umax:
-; CHECK32:       # %bb.0:
-; CHECK32-NEXT:    pushl %esi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 8
-; CHECK32-NEXT:    subl $24, %esp
-; CHECK32-NEXT:    .cfi_def_cfa_offset 32
-; CHECK32-NEXT:    .cfi_offset %esi, -8
-; CHECK32-NEXT:    subl $8, %esp
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 8
-; CHECK32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl %eax
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    calll __sync_fetch_and_umax_16
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset -4
-; CHECK32-NEXT:    addl $28, %esp
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset -28
-; CHECK32-NEXT:    movl (%esp), %eax
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; CHECK32-NEXT:    movl %esi, var+8
-; CHECK32-NEXT:    movl %edx, var+12
-; CHECK32-NEXT:    movl %eax, var
-; CHECK32-NEXT:    movl %ecx, var+4
-; CHECK32-NEXT:    addl $24, %esp
-; CHECK32-NEXT:    .cfi_def_cfa_offset 8
-; CHECK32-NEXT:    popl %esi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 4
-; CHECK32-NEXT:    retl
   %val = atomicrmw umax ptr %p, i128 %bits seq_cst
   store i128 %val, ptr @var, align 16
   ret void
@@ -688,61 +295,6 @@ define i128 @atomic_load_seq_cst(ptr %p) {
 ; CHECK-NEXT:    lock cmpxchg16b (%rdi)
 ; CHECK-NEXT:    popq %rbx
 ; CHECK-NEXT:    retq
-;
-; CHECK32-LABEL: atomic_load_seq_cst:
-; CHECK32:       # %bb.0:
-; CHECK32-NEXT:    pushl %edi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 8
-; CHECK32-NEXT:    pushl %esi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 12
-; CHECK32-NEXT:    subl $20, %esp
-; CHECK32-NEXT:    .cfi_def_cfa_offset 32
-; CHECK32-NEXT:    .cfi_offset %esi, -12
-; CHECK32-NEXT:    .cfi_offset %edi, -8
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; CHECK32-NEXT:    subl $8, %esp
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 8
-; CHECK32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; CHECK32-NEXT:    pushl $0
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl $0
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl $0
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl $0
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl $0
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl $0
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl $0
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl $0
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl %eax
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    calll __sync_val_compare_and_swap_16
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset -4
-; CHECK32-NEXT:    addl $44, %esp
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset -44
-; CHECK32-NEXT:    movl (%esp), %eax
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; CHECK32-NEXT:    movl %edi, 8(%esi)
-; CHECK32-NEXT:    movl %edx, 12(%esi)
-; CHECK32-NEXT:    movl %eax, (%esi)
-; CHECK32-NEXT:    movl %ecx, 4(%esi)
-; CHECK32-NEXT:    movl %esi, %eax
-; CHECK32-NEXT:    addl $20, %esp
-; CHECK32-NEXT:    .cfi_def_cfa_offset 12
-; CHECK32-NEXT:    popl %esi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 8
-; CHECK32-NEXT:    popl %edi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 4
-; CHECK32-NEXT:    retl $4
    %r = load atomic i128, ptr %p seq_cst, align 16
    ret i128 %r
 }
@@ -760,61 +312,6 @@ define i128 @atomic_load_relaxed(ptr %p) {
 ; CHECK-NEXT:    lock cmpxchg16b (%rdi)
 ; CHECK-NEXT:    popq %rbx
 ; CHECK-NEXT:    retq
-;
-; CHECK32-LABEL: atomic_load_relaxed:
-; CHECK32:       # %bb.0:
-; CHECK32-NEXT:    pushl %edi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 8
-; CHECK32-NEXT:    pushl %esi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 12
-; CHECK32-NEXT:    subl $20, %esp
-; CHECK32-NEXT:    .cfi_def_cfa_offset 32
-; CHECK32-NEXT:    .cfi_offset %esi, -12
-; CHECK32-NEXT:    .cfi_offset %edi, -8
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; CHECK32-NEXT:    subl $8, %esp
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 8
-; CHECK32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; CHECK32-NEXT:    pushl $0
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl $0
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl $0
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl $0
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl $0
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl $0
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl $0
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl $0
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl %eax
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    calll __sync_val_compare_and_swap_16
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset -4
-; CHECK32-NEXT:    addl $44, %esp
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset -44
-; CHECK32-NEXT:    movl (%esp), %eax
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; CHECK32-NEXT:    movl %edi, 8(%esi)
-; CHECK32-NEXT:    movl %edx, 12(%esi)
-; CHECK32-NEXT:    movl %eax, (%esi)
-; CHECK32-NEXT:    movl %ecx, 4(%esi)
-; CHECK32-NEXT:    movl %esi, %eax
-; CHECK32-NEXT:    addl $20, %esp
-; CHECK32-NEXT:    .cfi_def_cfa_offset 12
-; CHECK32-NEXT:    popl %esi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 8
-; CHECK32-NEXT:    popl %edi
-; CHECK32-NEXT:    .cfi_def_cfa_offset 4
-; CHECK32-NEXT:    retl $4
    %r = load atomic i128, ptr %p monotonic, align 16
    ret i128 %r
 }
@@ -837,29 +334,6 @@ define void @atomic_store_seq_cst(ptr %p, i128 %in) {
 ; CHECK-NEXT:  ## %bb.2: ## %atomicrmw.end
 ; CHECK-NEXT:    popq %rbx
 ; CHECK-NEXT:    retq
-;
-; CHECK32-LABEL: atomic_store_seq_cst:
-; CHECK32:       # %bb.0:
-; CHECK32-NEXT:    subl $36, %esp
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 36
-; CHECK32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl %eax
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    calll __sync_lock_test_and_set_16
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset -4
-; CHECK32-NEXT:    addl $56, %esp
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset -56
-; CHECK32-NEXT:    retl
    store atomic i128 %in, ptr %p seq_cst, align 16
    ret void
 }
@@ -882,29 +356,6 @@ define void @atomic_store_release(ptr %p, i128 %in) {
 ; CHECK-NEXT:  ## %bb.2: ## %atomicrmw.end
 ; CHECK-NEXT:    popq %rbx
 ; CHECK-NEXT:    retq
-;
-; CHECK32-LABEL: atomic_store_release:
-; CHECK32:       # %bb.0:
-; CHECK32-NEXT:    subl $36, %esp
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 36
-; CHECK32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl %eax
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    calll __sync_lock_test_and_set_16
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset -4
-; CHECK32-NEXT:    addl $56, %esp
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset -56
-; CHECK32-NEXT:    retl
    store atomic i128 %in, ptr %p release, align 16
    ret void
 }
@@ -927,29 +378,6 @@ define void @atomic_store_relaxed(ptr %p, i128 %in) {
 ; CHECK-NEXT:  ## %bb.2: ## %atomicrmw.end
 ; CHECK-NEXT:    popq %rbx
 ; CHECK-NEXT:    retq
-;
-; CHECK32-LABEL: atomic_store_relaxed:
-; CHECK32:       # %bb.0:
-; CHECK32-NEXT:    subl $36, %esp
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 36
-; CHECK32-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl {{[0-9]+}}(%esp)
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    pushl %eax
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset 4
-; CHECK32-NEXT:    calll __sync_lock_test_and_set_16
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset -4
-; CHECK32-NEXT:    addl $56, %esp
-; CHECK32-NEXT:    .cfi_adjust_cfa_offset -56
-; CHECK32-NEXT:    retl
    store atomic i128 %in, ptr %p unordered, align 16
    ret void
 }

diff  --git a/llvm/test/CodeGen/X86/atomicf128.ll b/llvm/test/CodeGen/X86/atomicf128.ll
deleted file mode 100644
index 3b0bba403aa5d5..00000000000000
--- a/llvm/test/CodeGen/X86/atomicf128.ll
+++ /dev/null
@@ -1,46 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-apple-macosx10.9 -verify-machineinstrs -mattr=cx16 | FileCheck %s
-; RUN: llc < %s -mtriple=x86_64-apple-macosx10.9 -verify-machineinstrs -mattr=cx16 -mattr=-sse | FileCheck %s --check-prefix=NOSSE
-
-; FIXME: This test has a fatal error in 32-bit mode
-
- at fsc128 = external global fp128
-
-define void @atomic_fetch_swapf128(fp128 %x) nounwind {
-; CHECK-LABEL: atomic_fetch_swapf128:
-; CHECK:       ## %bb.0:
-; CHECK-NEXT:    pushq %rbx
-; CHECK-NEXT:    movaps %xmm0, -{{[0-9]+}}(%rsp)
-; CHECK-NEXT:    movq -{{[0-9]+}}(%rsp), %rbx
-; CHECK-NEXT:    movq -{{[0-9]+}}(%rsp), %rcx
-; CHECK-NEXT:    movq _fsc128 at GOTPCREL(%rip), %rsi
-; CHECK-NEXT:    movq (%rsi), %rax
-; CHECK-NEXT:    movq 8(%rsi), %rdx
-; CHECK-NEXT:    .p2align 4, 0x90
-; CHECK-NEXT:  LBB0_1: ## %atomicrmw.start
-; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    lock cmpxchg16b (%rsi)
-; CHECK-NEXT:    jne LBB0_1
-; CHECK-NEXT:  ## %bb.2: ## %atomicrmw.end
-; CHECK-NEXT:    popq %rbx
-; CHECK-NEXT:    retq
-;
-; NOSSE-LABEL: atomic_fetch_swapf128:
-; NOSSE:       ## %bb.0:
-; NOSSE-NEXT:    pushq %rbx
-; NOSSE-NEXT:    movq %rsi, %rcx
-; NOSSE-NEXT:    movq %rdi, %rbx
-; NOSSE-NEXT:    movq _fsc128 at GOTPCREL(%rip), %rsi
-; NOSSE-NEXT:    movq (%rsi), %rax
-; NOSSE-NEXT:    movq 8(%rsi), %rdx
-; NOSSE-NEXT:    .p2align 4, 0x90
-; NOSSE-NEXT:  LBB0_1: ## %atomicrmw.start
-; NOSSE-NEXT:    ## =>This Inner Loop Header: Depth=1
-; NOSSE-NEXT:    lock cmpxchg16b (%rsi)
-; NOSSE-NEXT:    jne LBB0_1
-; NOSSE-NEXT:  ## %bb.2: ## %atomicrmw.end
-; NOSSE-NEXT:    popq %rbx
-; NOSSE-NEXT:    retq
-  %t1 = atomicrmw xchg ptr @fsc128, fp128 %x acquire
-  ret void
-}

diff  --git a/llvm/test/CodeGen/X86/nocx16.ll b/llvm/test/CodeGen/X86/nocx16.ll
deleted file mode 100644
index ec8e6b2c8c6ac0..00000000000000
--- a/llvm/test/CodeGen/X86/nocx16.ll
+++ /dev/null
@@ -1,21 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64-- -mcpu=corei7 -mattr=-cx16 | FileCheck %s
-define void @test(ptr %a) nounwind {
-entry:
-; CHECK: __sync_val_compare_and_swap_16
-  %0 = cmpxchg ptr %a, i128 1, i128 1 seq_cst seq_cst
-; CHECK: __sync_lock_test_and_set_16
-  %1 = atomicrmw xchg ptr %a, i128 1 seq_cst
-; CHECK: __sync_fetch_and_add_16
-  %2 = atomicrmw add ptr %a, i128 1 seq_cst
-; CHECK: __sync_fetch_and_sub_16
-  %3 = atomicrmw sub ptr %a, i128 1 seq_cst
-; CHECK: __sync_fetch_and_and_16
-  %4 = atomicrmw and ptr %a, i128 1 seq_cst
-; CHECK: __sync_fetch_and_nand_16
-  %5 = atomicrmw nand ptr %a, i128 1 seq_cst
-; CHECK: __sync_fetch_and_or_16
-  %6 = atomicrmw or ptr %a, i128 1 seq_cst
-; CHECK: __sync_fetch_and_xor_16
-  %7 = atomicrmw xor ptr %a, i128 1 seq_cst
-  ret void
-}


        


More information about the llvm-commits mailing list