[llvm] r357881 - [X86] Add avx and avx512f command lines to atomic-non-integer.ll. NFC

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sun Apr 7 18:54:24 PDT 2019


Author: ctopper
Date: Sun Apr  7 18:54:24 2019
New Revision: 357881

URL: http://llvm.org/viewvc/llvm-project?rev=357881&view=rev
Log:
[X86] Add avx and avx512f command lines to atomic-non-integer.ll. NFC

Modified:
    llvm/trunk/test/CodeGen/X86/atomic-non-integer.ll

Modified: llvm/trunk/test/CodeGen/X86/atomic-non-integer.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/atomic-non-integer.ll?rev=357881&r1=357880&r2=357881&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/atomic-non-integer.ll (original)
+++ llvm/trunk/test/CodeGen/X86/atomic-non-integer.ll Sun Apr  7 18:54:24 2019
@@ -1,7 +1,11 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=i386-linux-generic -verify-machineinstrs -mattr=sse2 | FileCheck %s --check-prefix=X86 --check-prefix=X86-SSE
+; RUN: llc < %s -mtriple=i386-linux-generic -verify-machineinstrs -mattr=avx | FileCheck %s --check-prefix=X86 --check-prefix=X86-AVX --check-prefix=X86-AVX1
+; RUN: llc < %s -mtriple=i386-linux-generic -verify-machineinstrs -mattr=avx512f | FileCheck %s --check-prefix=X86 --check-prefix=X86-AVX --check-prefix=X86-AVX512
 ; RUN: llc < %s -mtriple=i386-linux-generic -verify-machineinstrs | FileCheck %s --check-prefix=X86 --check-prefix=X86-NOSSE
-; RUN: llc < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mattr=sse2 | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mattr=sse2 | FileCheck %s --check-prefix=X64 --check-prefix=X64-SSE
+; RUN: llc < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mattr=avx | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX  --check-prefix=X64-AVX1
+; RUN: llc < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mattr=avx512f | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX  --check-prefix=X64-AVX512
 
 ; Note: This test is testing that the lowering for atomics matches what we
 ; currently emit for non-atomics + the atomic restriction.  The presence of
@@ -29,6 +33,33 @@ define void @store_half(half* %fptr, hal
 ; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
 ; X86-SSE-NEXT:    retl
 ;
+; X86-AVX1-LABEL: store_half:
+; X86-AVX1:       # %bb.0:
+; X86-AVX1-NEXT:    pushl %esi
+; X86-AVX1-NEXT:    .cfi_def_cfa_offset 8
+; X86-AVX1-NEXT:    subl $8, %esp
+; X86-AVX1-NEXT:    .cfi_def_cfa_offset 16
+; X86-AVX1-NEXT:    .cfi_offset %esi, -8
+; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX1-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-AVX1-NEXT:    calll __gnu_f2h_ieee
+; X86-AVX1-NEXT:    movw %ax, (%esi)
+; X86-AVX1-NEXT:    addl $8, %esp
+; X86-AVX1-NEXT:    .cfi_def_cfa_offset 8
+; X86-AVX1-NEXT:    popl %esi
+; X86-AVX1-NEXT:    .cfi_def_cfa_offset 4
+; X86-AVX1-NEXT:    retl
+;
+; X86-AVX512-LABEL: store_half:
+; X86-AVX512:       # %bb.0:
+; X86-AVX512-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX512-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
+; X86-AVX512-NEXT:    vmovd %xmm0, %eax
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-AVX512-NEXT:    movw %ax, (%ecx)
+; X86-AVX512-NEXT:    retl
+;
 ; X86-NOSSE-LABEL: store_half:
 ; X86-NOSSE:       # %bb.0:
 ; X86-NOSSE-NEXT:    pushl %esi
@@ -47,17 +78,36 @@ define void @store_half(half* %fptr, hal
 ; X86-NOSSE-NEXT:    .cfi_def_cfa_offset 4
 ; X86-NOSSE-NEXT:    retl
 ;
-; X64-LABEL: store_half:
-; X64:       # %bb.0:
-; X64-NEXT:    pushq %rbx
-; X64-NEXT:    .cfi_def_cfa_offset 16
-; X64-NEXT:    .cfi_offset %rbx, -16
-; X64-NEXT:    movq %rdi, %rbx
-; X64-NEXT:    callq __gnu_f2h_ieee
-; X64-NEXT:    movw %ax, (%rbx)
-; X64-NEXT:    popq %rbx
-; X64-NEXT:    .cfi_def_cfa_offset 8
-; X64-NEXT:    retq
+; X64-SSE-LABEL: store_half:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    pushq %rbx
+; X64-SSE-NEXT:    .cfi_def_cfa_offset 16
+; X64-SSE-NEXT:    .cfi_offset %rbx, -16
+; X64-SSE-NEXT:    movq %rdi, %rbx
+; X64-SSE-NEXT:    callq __gnu_f2h_ieee
+; X64-SSE-NEXT:    movw %ax, (%rbx)
+; X64-SSE-NEXT:    popq %rbx
+; X64-SSE-NEXT:    .cfi_def_cfa_offset 8
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX1-LABEL: store_half:
+; X64-AVX1:       # %bb.0:
+; X64-AVX1-NEXT:    pushq %rbx
+; X64-AVX1-NEXT:    .cfi_def_cfa_offset 16
+; X64-AVX1-NEXT:    .cfi_offset %rbx, -16
+; X64-AVX1-NEXT:    movq %rdi, %rbx
+; X64-AVX1-NEXT:    callq __gnu_f2h_ieee
+; X64-AVX1-NEXT:    movw %ax, (%rbx)
+; X64-AVX1-NEXT:    popq %rbx
+; X64-AVX1-NEXT:    .cfi_def_cfa_offset 8
+; X64-AVX1-NEXT:    retq
+;
+; X64-AVX512-LABEL: store_half:
+; X64-AVX512:       # %bb.0:
+; X64-AVX512-NEXT:    vcvtps2ph $4, %xmm0, %xmm0
+; X64-AVX512-NEXT:    vmovd %xmm0, %eax
+; X64-AVX512-NEXT:    movw %ax, (%rdi)
+; X64-AVX512-NEXT:    retq
   store atomic half %v, half* %fptr unordered, align 2
   ret void
 }
@@ -70,11 +120,17 @@ define void @store_float(float* %fptr, f
 ; X86-NEXT:    movl %ecx, (%eax)
 ; X86-NEXT:    retl
 ;
-; X64-LABEL: store_float:
-; X64:       # %bb.0:
-; X64-NEXT:    movd %xmm0, %eax
-; X64-NEXT:    movl %eax, (%rdi)
-; X64-NEXT:    retq
+; X64-SSE-LABEL: store_float:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movd %xmm0, %eax
+; X64-SSE-NEXT:    movl %eax, (%rdi)
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: store_float:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovd %xmm0, %eax
+; X64-AVX-NEXT:    movl %eax, (%rdi)
+; X64-AVX-NEXT:    retq
   store atomic float %v, float* %fptr unordered, align 4
   ret void
 }
@@ -105,38 +161,82 @@ define void @store_double(double* %fptr,
 ; X86-NEXT:    .cfi_def_cfa_offset 4
 ; X86-NEXT:    retl
 ;
-; X64-LABEL: store_double:
-; X64:       # %bb.0:
-; X64-NEXT:    movq %xmm0, %rax
-; X64-NEXT:    movq %rax, (%rdi)
-; X64-NEXT:    retq
+; X64-SSE-LABEL: store_double:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movq %xmm0, %rax
+; X64-SSE-NEXT:    movq %rax, (%rdi)
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: store_double:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovq %xmm0, %rax
+; X64-AVX-NEXT:    movq %rax, (%rdi)
+; X64-AVX-NEXT:    retq
   store atomic double %v, double* %fptr unordered, align 8
   ret void
 }
 
 define void @store_fp128(fp128* %fptr, fp128 %v) {
-; X86-LABEL: store_fp128:
-; X86:       # %bb.0:
-; X86-NEXT:    subl $36, %esp
-; X86-NEXT:    .cfi_adjust_cfa_offset 36
-; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    pushl {{[0-9]+}}(%esp)
-; X86-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-NEXT:    pushl {{[0-9]+}}(%esp)
-; X86-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-NEXT:    pushl {{[0-9]+}}(%esp)
-; X86-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-NEXT:    pushl {{[0-9]+}}(%esp)
-; X86-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-NEXT:    pushl {{[0-9]+}}(%esp)
-; X86-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-NEXT:    pushl %eax
-; X86-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-NEXT:    calll __sync_lock_test_and_set_16
-; X86-NEXT:    .cfi_adjust_cfa_offset -4
-; X86-NEXT:    addl $56, %esp
-; X86-NEXT:    .cfi_adjust_cfa_offset -56
-; X86-NEXT:    retl
+; X86-SSE-LABEL: store_fp128:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    subl $36, %esp
+; X86-SSE-NEXT:    .cfi_adjust_cfa_offset 36
+; X86-SSE-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    pushl {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    .cfi_adjust_cfa_offset 4
+; X86-SSE-NEXT:    pushl {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    .cfi_adjust_cfa_offset 4
+; X86-SSE-NEXT:    pushl {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    .cfi_adjust_cfa_offset 4
+; X86-SSE-NEXT:    pushl {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    .cfi_adjust_cfa_offset 4
+; X86-SSE-NEXT:    pushl {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    .cfi_adjust_cfa_offset 4
+; X86-SSE-NEXT:    pushl %eax
+; X86-SSE-NEXT:    .cfi_adjust_cfa_offset 4
+; X86-SSE-NEXT:    calll __sync_lock_test_and_set_16
+; X86-SSE-NEXT:    .cfi_adjust_cfa_offset -4
+; X86-SSE-NEXT:    addl $56, %esp
+; X86-SSE-NEXT:    .cfi_adjust_cfa_offset -56
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: store_fp128:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    subl $44, %esp
+; X86-AVX-NEXT:    .cfi_def_cfa_offset 48
+; X86-AVX-NEXT:    vmovaps {{[0-9]+}}(%esp), %xmm0
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    movl %eax, {{[0-9]+}}(%esp)
+; X86-AVX-NEXT:    vmovups %xmm0, {{[0-9]+}}(%esp)
+; X86-AVX-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    movl %eax, (%esp)
+; X86-AVX-NEXT:    calll __sync_lock_test_and_set_16
+; X86-AVX-NEXT:    addl $40, %esp
+; X86-AVX-NEXT:    .cfi_def_cfa_offset 4
+; X86-AVX-NEXT:    retl
+;
+; X86-NOSSE-LABEL: store_fp128:
+; X86-NOSSE:       # %bb.0:
+; X86-NOSSE-NEXT:    subl $36, %esp
+; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset 36
+; X86-NOSSE-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT:    pushl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset 4
+; X86-NOSSE-NEXT:    pushl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset 4
+; X86-NOSSE-NEXT:    pushl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset 4
+; X86-NOSSE-NEXT:    pushl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset 4
+; X86-NOSSE-NEXT:    pushl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset 4
+; X86-NOSSE-NEXT:    pushl %eax
+; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset 4
+; X86-NOSSE-NEXT:    calll __sync_lock_test_and_set_16
+; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset -4
+; X86-NOSSE-NEXT:    addl $56, %esp
+; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset -56
+; X86-NOSSE-NEXT:    retl
 ;
 ; X64-LABEL: store_fp128:
 ; X64:       # %bb.0:
@@ -151,27 +251,82 @@ define void @store_fp128(fp128* %fptr, f
 }
 
 define half @load_half(half* %fptr) {
-; X86-LABEL: load_half:
-; X86:       # %bb.0:
-; X86-NEXT:    subl $12, %esp
-; X86-NEXT:    .cfi_def_cfa_offset 16
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movzwl (%eax), %eax
-; X86-NEXT:    movl %eax, (%esp)
-; X86-NEXT:    calll __gnu_h2f_ieee
-; X86-NEXT:    addl $12, %esp
-; X86-NEXT:    .cfi_def_cfa_offset 4
-; X86-NEXT:    retl
+; X86-SSE-LABEL: load_half:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    subl $12, %esp
+; X86-SSE-NEXT:    .cfi_def_cfa_offset 16
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movzwl (%eax), %eax
+; X86-SSE-NEXT:    movl %eax, (%esp)
+; X86-SSE-NEXT:    calll __gnu_h2f_ieee
+; X86-SSE-NEXT:    addl $12, %esp
+; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
+; X86-SSE-NEXT:    retl
 ;
-; X64-LABEL: load_half:
-; X64:       # %bb.0:
-; X64-NEXT:    pushq %rax
-; X64-NEXT:    .cfi_def_cfa_offset 16
-; X64-NEXT:    movzwl (%rdi), %edi
-; X64-NEXT:    callq __gnu_h2f_ieee
-; X64-NEXT:    popq %rax
-; X64-NEXT:    .cfi_def_cfa_offset 8
-; X64-NEXT:    retq
+; X86-AVX1-LABEL: load_half:
+; X86-AVX1:       # %bb.0:
+; X86-AVX1-NEXT:    subl $12, %esp
+; X86-AVX1-NEXT:    .cfi_def_cfa_offset 16
+; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX1-NEXT:    movzwl (%eax), %eax
+; X86-AVX1-NEXT:    movl %eax, (%esp)
+; X86-AVX1-NEXT:    calll __gnu_h2f_ieee
+; X86-AVX1-NEXT:    addl $12, %esp
+; X86-AVX1-NEXT:    .cfi_def_cfa_offset 4
+; X86-AVX1-NEXT:    retl
+;
+; X86-AVX512-LABEL: load_half:
+; X86-AVX512:       # %bb.0:
+; X86-AVX512-NEXT:    pushl %eax
+; X86-AVX512-NEXT:    .cfi_def_cfa_offset 8
+; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX512-NEXT:    movswl (%eax), %eax
+; X86-AVX512-NEXT:    vmovd %eax, %xmm0
+; X86-AVX512-NEXT:    vcvtph2ps %xmm0, %xmm0
+; X86-AVX512-NEXT:    vmovss %xmm0, (%esp)
+; X86-AVX512-NEXT:    flds (%esp)
+; X86-AVX512-NEXT:    popl %eax
+; X86-AVX512-NEXT:    .cfi_def_cfa_offset 4
+; X86-AVX512-NEXT:    retl
+;
+; X86-NOSSE-LABEL: load_half:
+; X86-NOSSE:       # %bb.0:
+; X86-NOSSE-NEXT:    subl $12, %esp
+; X86-NOSSE-NEXT:    .cfi_def_cfa_offset 16
+; X86-NOSSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT:    movzwl (%eax), %eax
+; X86-NOSSE-NEXT:    movl %eax, (%esp)
+; X86-NOSSE-NEXT:    calll __gnu_h2f_ieee
+; X86-NOSSE-NEXT:    addl $12, %esp
+; X86-NOSSE-NEXT:    .cfi_def_cfa_offset 4
+; X86-NOSSE-NEXT:    retl
+;
+; X64-SSE-LABEL: load_half:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    pushq %rax
+; X64-SSE-NEXT:    .cfi_def_cfa_offset 16
+; X64-SSE-NEXT:    movzwl (%rdi), %edi
+; X64-SSE-NEXT:    callq __gnu_h2f_ieee
+; X64-SSE-NEXT:    popq %rax
+; X64-SSE-NEXT:    .cfi_def_cfa_offset 8
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX1-LABEL: load_half:
+; X64-AVX1:       # %bb.0:
+; X64-AVX1-NEXT:    pushq %rax
+; X64-AVX1-NEXT:    .cfi_def_cfa_offset 16
+; X64-AVX1-NEXT:    movzwl (%rdi), %edi
+; X64-AVX1-NEXT:    callq __gnu_h2f_ieee
+; X64-AVX1-NEXT:    popq %rax
+; X64-AVX1-NEXT:    .cfi_def_cfa_offset 8
+; X64-AVX1-NEXT:    retq
+;
+; X64-AVX512-LABEL: load_half:
+; X64-AVX512:       # %bb.0:
+; X64-AVX512-NEXT:    movswl (%rdi), %eax
+; X64-AVX512-NEXT:    vmovd %eax, %xmm0
+; X64-AVX512-NEXT:    vcvtph2ps %xmm0, %xmm0
+; X64-AVX512-NEXT:    retq
   %v = load atomic half, half* %fptr unordered, align 2
   ret half %v
 }
@@ -189,6 +344,18 @@ define float @load_float(float* %fptr) {
 ; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
 ; X86-SSE-NEXT:    retl
 ;
+; X86-AVX-LABEL: load_float:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    pushl %eax
+; X86-AVX-NEXT:    .cfi_def_cfa_offset 8
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovd (%eax), %xmm0
+; X86-AVX-NEXT:    vmovd %xmm0, (%esp)
+; X86-AVX-NEXT:    flds (%esp)
+; X86-AVX-NEXT:    popl %eax
+; X86-AVX-NEXT:    .cfi_def_cfa_offset 4
+; X86-AVX-NEXT:    retl
+;
 ; X86-NOSSE-LABEL: load_float:
 ; X86-NOSSE:       # %bb.0:
 ; X86-NOSSE-NEXT:    pushl %eax
@@ -201,10 +368,15 @@ define float @load_float(float* %fptr) {
 ; X86-NOSSE-NEXT:    .cfi_def_cfa_offset 4
 ; X86-NOSSE-NEXT:    retl
 ;
-; X64-LABEL: load_float:
-; X64:       # %bb.0:
-; X64-NEXT:    movd (%rdi), %xmm0
-; X64-NEXT:    retq
+; X64-SSE-LABEL: load_float:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movd (%rdi), %xmm0
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: load_float:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovd (%rdi), %xmm0
+; X64-AVX-NEXT:    retq
   %v = load atomic float, float* %fptr unordered, align 4
   ret float %v
 }
@@ -222,6 +394,18 @@ define double @load_double(double* %fptr
 ; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
 ; X86-SSE-NEXT:    retl
 ;
+; X86-AVX-LABEL: load_double:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    subl $12, %esp
+; X86-AVX-NEXT:    .cfi_def_cfa_offset 16
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT:    vmovlps %xmm0, (%esp)
+; X86-AVX-NEXT:    fldl (%esp)
+; X86-AVX-NEXT:    addl $12, %esp
+; X86-AVX-NEXT:    .cfi_def_cfa_offset 4
+; X86-AVX-NEXT:    retl
+;
 ; X86-NOSSE-LABEL: load_double:
 ; X86-NOSSE:       # %bb.0:
 ; X86-NOSSE-NEXT:    pushl %ebx
@@ -249,69 +433,155 @@ define double @load_double(double* %fptr
 ; X86-NOSSE-NEXT:    .cfi_def_cfa_offset 4
 ; X86-NOSSE-NEXT:    retl
 ;
-; X64-LABEL: load_double:
-; X64:       # %bb.0:
-; X64-NEXT:    movq (%rdi), %xmm0
-; X64-NEXT:    retq
+; X64-SSE-LABEL: load_double:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movq (%rdi), %xmm0
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: load_double:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovq (%rdi), %xmm0
+; X64-AVX-NEXT:    retq
   %v = load atomic double, double* %fptr unordered, align 8
   ret double %v
 }
 
 define fp128 @load_fp128(fp128* %fptr) {
-; X86-LABEL: load_fp128:
-; X86:       # %bb.0:
-; X86-NEXT:    pushl %edi
-; X86-NEXT:    .cfi_def_cfa_offset 8
-; X86-NEXT:    pushl %esi
-; X86-NEXT:    .cfi_def_cfa_offset 12
-; X86-NEXT:    subl $20, %esp
-; X86-NEXT:    .cfi_def_cfa_offset 32
-; X86-NEXT:    .cfi_offset %esi, -12
-; X86-NEXT:    .cfi_offset %edi, -8
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT:    subl $8, %esp
-; X86-NEXT:    .cfi_adjust_cfa_offset 8
-; X86-NEXT:    leal {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    pushl $0
-; X86-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-NEXT:    pushl $0
-; X86-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-NEXT:    pushl $0
-; X86-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-NEXT:    pushl $0
-; X86-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-NEXT:    pushl $0
-; X86-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-NEXT:    pushl $0
-; X86-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-NEXT:    pushl $0
-; X86-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-NEXT:    pushl $0
-; X86-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-NEXT:    pushl {{[0-9]+}}(%esp)
-; X86-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-NEXT:    pushl %eax
-; X86-NEXT:    .cfi_adjust_cfa_offset 4
-; X86-NEXT:    calll __sync_val_compare_and_swap_16
-; X86-NEXT:    .cfi_adjust_cfa_offset -4
-; X86-NEXT:    addl $44, %esp
-; X86-NEXT:    .cfi_adjust_cfa_offset -44
-; X86-NEXT:    movl (%esp), %eax
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT:    movl %edi, 8(%esi)
-; X86-NEXT:    movl %edx, 12(%esi)
-; X86-NEXT:    movl %eax, (%esi)
-; X86-NEXT:    movl %ecx, 4(%esi)
-; X86-NEXT:    movl %esi, %eax
-; X86-NEXT:    addl $20, %esp
-; X86-NEXT:    .cfi_def_cfa_offset 12
-; X86-NEXT:    popl %esi
-; X86-NEXT:    .cfi_def_cfa_offset 8
-; X86-NEXT:    popl %edi
-; X86-NEXT:    .cfi_def_cfa_offset 4
-; X86-NEXT:    retl $4
+; X86-SSE-LABEL: load_fp128:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    pushl %edi
+; X86-SSE-NEXT:    .cfi_def_cfa_offset 8
+; X86-SSE-NEXT:    pushl %esi
+; X86-SSE-NEXT:    .cfi_def_cfa_offset 12
+; X86-SSE-NEXT:    subl $20, %esp
+; X86-SSE-NEXT:    .cfi_def_cfa_offset 32
+; X86-SSE-NEXT:    .cfi_offset %esi, -12
+; X86-SSE-NEXT:    .cfi_offset %edi, -8
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-SSE-NEXT:    subl $8, %esp
+; X86-SSE-NEXT:    .cfi_adjust_cfa_offset 8
+; X86-SSE-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    pushl $0
+; X86-SSE-NEXT:    .cfi_adjust_cfa_offset 4
+; X86-SSE-NEXT:    pushl $0
+; X86-SSE-NEXT:    .cfi_adjust_cfa_offset 4
+; X86-SSE-NEXT:    pushl $0
+; X86-SSE-NEXT:    .cfi_adjust_cfa_offset 4
+; X86-SSE-NEXT:    pushl $0
+; X86-SSE-NEXT:    .cfi_adjust_cfa_offset 4
+; X86-SSE-NEXT:    pushl $0
+; X86-SSE-NEXT:    .cfi_adjust_cfa_offset 4
+; X86-SSE-NEXT:    pushl $0
+; X86-SSE-NEXT:    .cfi_adjust_cfa_offset 4
+; X86-SSE-NEXT:    pushl $0
+; X86-SSE-NEXT:    .cfi_adjust_cfa_offset 4
+; X86-SSE-NEXT:    pushl $0
+; X86-SSE-NEXT:    .cfi_adjust_cfa_offset 4
+; X86-SSE-NEXT:    pushl {{[0-9]+}}(%esp)
+; X86-SSE-NEXT:    .cfi_adjust_cfa_offset 4
+; X86-SSE-NEXT:    pushl %eax
+; X86-SSE-NEXT:    .cfi_adjust_cfa_offset 4
+; X86-SSE-NEXT:    calll __sync_val_compare_and_swap_16
+; X86-SSE-NEXT:    .cfi_adjust_cfa_offset -4
+; X86-SSE-NEXT:    addl $44, %esp
+; X86-SSE-NEXT:    .cfi_adjust_cfa_offset -44
+; X86-SSE-NEXT:    movl (%esp), %eax
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-SSE-NEXT:    movl %edi, 8(%esi)
+; X86-SSE-NEXT:    movl %edx, 12(%esi)
+; X86-SSE-NEXT:    movl %eax, (%esi)
+; X86-SSE-NEXT:    movl %ecx, 4(%esi)
+; X86-SSE-NEXT:    movl %esi, %eax
+; X86-SSE-NEXT:    addl $20, %esp
+; X86-SSE-NEXT:    .cfi_def_cfa_offset 12
+; X86-SSE-NEXT:    popl %esi
+; X86-SSE-NEXT:    .cfi_def_cfa_offset 8
+; X86-SSE-NEXT:    popl %edi
+; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
+; X86-SSE-NEXT:    retl $4
+;
+; X86-AVX-LABEL: load_fp128:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    pushl %esi
+; X86-AVX-NEXT:    .cfi_def_cfa_offset 8
+; X86-AVX-NEXT:    subl $56, %esp
+; X86-AVX-NEXT:    .cfi_def_cfa_offset 64
+; X86-AVX-NEXT:    .cfi_offset %esi, -8
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; X86-AVX-NEXT:    vmovups %ymm0, {{[0-9]+}}(%esp)
+; X86-AVX-NEXT:    movl %eax, {{[0-9]+}}(%esp)
+; X86-AVX-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    movl %eax, (%esp)
+; X86-AVX-NEXT:    vzeroupper
+; X86-AVX-NEXT:    calll __sync_val_compare_and_swap_16
+; X86-AVX-NEXT:    subl $4, %esp
+; X86-AVX-NEXT:    vmovups {{[0-9]+}}(%esp), %xmm0
+; X86-AVX-NEXT:    vmovaps %xmm0, (%esi)
+; X86-AVX-NEXT:    movl %esi, %eax
+; X86-AVX-NEXT:    addl $56, %esp
+; X86-AVX-NEXT:    .cfi_def_cfa_offset 8
+; X86-AVX-NEXT:    popl %esi
+; X86-AVX-NEXT:    .cfi_def_cfa_offset 4
+; X86-AVX-NEXT:    retl $4
+;
+; X86-NOSSE-LABEL: load_fp128:
+; X86-NOSSE:       # %bb.0:
+; X86-NOSSE-NEXT:    pushl %edi
+; X86-NOSSE-NEXT:    .cfi_def_cfa_offset 8
+; X86-NOSSE-NEXT:    pushl %esi
+; X86-NOSSE-NEXT:    .cfi_def_cfa_offset 12
+; X86-NOSSE-NEXT:    subl $20, %esp
+; X86-NOSSE-NEXT:    .cfi_def_cfa_offset 32
+; X86-NOSSE-NEXT:    .cfi_offset %esi, -12
+; X86-NOSSE-NEXT:    .cfi_offset %edi, -8
+; X86-NOSSE-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NOSSE-NEXT:    subl $8, %esp
+; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset 8
+; X86-NOSSE-NEXT:    leal {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT:    pushl $0
+; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset 4
+; X86-NOSSE-NEXT:    pushl $0
+; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset 4
+; X86-NOSSE-NEXT:    pushl $0
+; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset 4
+; X86-NOSSE-NEXT:    pushl $0
+; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset 4
+; X86-NOSSE-NEXT:    pushl $0
+; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset 4
+; X86-NOSSE-NEXT:    pushl $0
+; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset 4
+; X86-NOSSE-NEXT:    pushl $0
+; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset 4
+; X86-NOSSE-NEXT:    pushl $0
+; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset 4
+; X86-NOSSE-NEXT:    pushl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset 4
+; X86-NOSSE-NEXT:    pushl %eax
+; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset 4
+; X86-NOSSE-NEXT:    calll __sync_val_compare_and_swap_16
+; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset -4
+; X86-NOSSE-NEXT:    addl $44, %esp
+; X86-NOSSE-NEXT:    .cfi_adjust_cfa_offset -44
+; X86-NOSSE-NEXT:    movl (%esp), %eax
+; X86-NOSSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NOSSE-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NOSSE-NEXT:    movl %edi, 8(%esi)
+; X86-NOSSE-NEXT:    movl %edx, 12(%esi)
+; X86-NOSSE-NEXT:    movl %eax, (%esi)
+; X86-NOSSE-NEXT:    movl %ecx, 4(%esi)
+; X86-NOSSE-NEXT:    movl %esi, %eax
+; X86-NOSSE-NEXT:    addl $20, %esp
+; X86-NOSSE-NEXT:    .cfi_def_cfa_offset 12
+; X86-NOSSE-NEXT:    popl %esi
+; X86-NOSSE-NEXT:    .cfi_def_cfa_offset 8
+; X86-NOSSE-NEXT:    popl %edi
+; X86-NOSSE-NEXT:    .cfi_def_cfa_offset 4
+; X86-NOSSE-NEXT:    retl $4
 ;
 ; X64-LABEL: load_fp128:
 ; X64:       # %bb.0:
@@ -341,11 +611,17 @@ define void @store_float_seq_cst(float*
 ; X86-NEXT:    xchgl %ecx, (%eax)
 ; X86-NEXT:    retl
 ;
-; X64-LABEL: store_float_seq_cst:
-; X64:       # %bb.0:
-; X64-NEXT:    movd %xmm0, %eax
-; X64-NEXT:    xchgl %eax, (%rdi)
-; X64-NEXT:    retq
+; X64-SSE-LABEL: store_float_seq_cst:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movd %xmm0, %eax
+; X64-SSE-NEXT:    xchgl %eax, (%rdi)
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: store_float_seq_cst:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovd %xmm0, %eax
+; X64-AVX-NEXT:    xchgl %eax, (%rdi)
+; X64-AVX-NEXT:    retq
   store atomic float %v, float* %fptr seq_cst, align 4
   ret void
 }
@@ -376,11 +652,17 @@ define void @store_double_seq_cst(double
 ; X86-NEXT:    .cfi_def_cfa_offset 4
 ; X86-NEXT:    retl
 ;
-; X64-LABEL: store_double_seq_cst:
-; X64:       # %bb.0:
-; X64-NEXT:    movq %xmm0, %rax
-; X64-NEXT:    xchgq %rax, (%rdi)
-; X64-NEXT:    retq
+; X64-SSE-LABEL: store_double_seq_cst:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movq %xmm0, %rax
+; X64-SSE-NEXT:    xchgq %rax, (%rdi)
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: store_double_seq_cst:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    vmovq %xmm0, %rax
+; X64-AVX-NEXT:    xchgq %rax, (%rdi)
+; X64-AVX-NEXT:    retq
   store atomic double %v, double* %fptr seq_cst, align 8
   ret void
 }
@@ -399,6 +681,19 @@ define float @load_float_seq_cst(float*
 ; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
 ; X86-SSE-NEXT:    retl
 ;
+; X86-AVX-LABEL: load_float_seq_cst:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    pushl %eax
+; X86-AVX-NEXT:    .cfi_def_cfa_offset 8
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    movl (%eax), %eax
+; X86-AVX-NEXT:    vmovd %eax, %xmm0
+; X86-AVX-NEXT:    vmovd %xmm0, (%esp)
+; X86-AVX-NEXT:    flds (%esp)
+; X86-AVX-NEXT:    popl %eax
+; X86-AVX-NEXT:    .cfi_def_cfa_offset 4
+; X86-AVX-NEXT:    retl
+;
 ; X86-NOSSE-LABEL: load_float_seq_cst:
 ; X86-NOSSE:       # %bb.0:
 ; X86-NOSSE-NEXT:    pushl %eax
@@ -411,11 +706,17 @@ define float @load_float_seq_cst(float*
 ; X86-NOSSE-NEXT:    .cfi_def_cfa_offset 4
 ; X86-NOSSE-NEXT:    retl
 ;
-; X64-LABEL: load_float_seq_cst:
-; X64:       # %bb.0:
-; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    movd %eax, %xmm0
-; X64-NEXT:    retq
+; X64-SSE-LABEL: load_float_seq_cst:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movl (%rdi), %eax
+; X64-SSE-NEXT:    movd %eax, %xmm0
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: load_float_seq_cst:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    movl (%rdi), %eax
+; X64-AVX-NEXT:    vmovd %eax, %xmm0
+; X64-AVX-NEXT:    retq
   %v = load atomic float, float* %fptr seq_cst, align 4
   ret float %v
 }
@@ -433,6 +734,18 @@ define double @load_double_seq_cst(doubl
 ; X86-SSE-NEXT:    .cfi_def_cfa_offset 4
 ; X86-SSE-NEXT:    retl
 ;
+; X86-AVX-LABEL: load_double_seq_cst:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    subl $12, %esp
+; X86-AVX-NEXT:    .cfi_def_cfa_offset 16
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT:    vmovlps %xmm0, (%esp)
+; X86-AVX-NEXT:    fldl (%esp)
+; X86-AVX-NEXT:    addl $12, %esp
+; X86-AVX-NEXT:    .cfi_def_cfa_offset 4
+; X86-AVX-NEXT:    retl
+;
 ; X86-NOSSE-LABEL: load_double_seq_cst:
 ; X86-NOSSE:       # %bb.0:
 ; X86-NOSSE-NEXT:    pushl %ebx
@@ -460,11 +773,17 @@ define double @load_double_seq_cst(doubl
 ; X86-NOSSE-NEXT:    .cfi_def_cfa_offset 4
 ; X86-NOSSE-NEXT:    retl
 ;
-; X64-LABEL: load_double_seq_cst:
-; X64:       # %bb.0:
-; X64-NEXT:    movq (%rdi), %rax
-; X64-NEXT:    movq %rax, %xmm0
-; X64-NEXT:    retq
+; X64-SSE-LABEL: load_double_seq_cst:
+; X64-SSE:       # %bb.0:
+; X64-SSE-NEXT:    movq (%rdi), %rax
+; X64-SSE-NEXT:    movq %rax, %xmm0
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: load_double_seq_cst:
+; X64-AVX:       # %bb.0:
+; X64-AVX-NEXT:    movq (%rdi), %rax
+; X64-AVX-NEXT:    vmovq %rax, %xmm0
+; X64-AVX-NEXT:    retq
   %v = load atomic double, double* %fptr seq_cst, align 8
   ret double %v
 }




More information about the llvm-commits mailing list