[llvm] 2460989 - [SelectionDAG] Enable lowering unordered atomics loads w/LoadSDNode (and stores w/StoreSDNode) by default

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Tue Oct 29 12:46:32 PDT 2019


Author: Philip Reames
Date: 2019-10-29T12:46:24-07:00
New Revision: 2460989eabb8adca4b7973aa23997b5ec832219b

URL: https://github.com/llvm/llvm-project/commit/2460989eabb8adca4b7973aa23997b5ec832219b
DIFF: https://github.com/llvm/llvm-project/commit/2460989eabb8adca4b7973aa23997b5ec832219b.diff

LOG: [SelectionDAG] Enable lowering unordered atomics loads w/LoadSDNode (and stores w/StoreSDNode) by default

Enable the new SelectionDAG representation for unordered loads and stores introduced in r371441 by default.  As a reminder, the new lowering changes the representation of an unordered atomic load from an AtomicSDNode - which is essentially a black box which gets passed through without combines messing with it - to a LoadSDNode w/a atomic marker on the MMO. The later parallels the way we handle volatiles, and I've audited the code to ensure that every location which checks one checks the other.

This has been fairly heavily fuzzed, and I examined diffs in a reasonable large corpus of assembly by hand, so I'm reasonable sure this is correct for the common case.  Late in the review for this, it was discovered that I hadn't correctly handled cases which could be legalized into CAS operations.  This points out that there's a strong bias in the IR of the frontend I'm working with towards only legal atomics.  If there are problems with this patch, the most likely area will be legalization.

Differential Revision: https://reviews.llvm.org/D69219

Added: 
    

Modified: 
    llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
    llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
    llvm/lib/Target/X86/X86ISelLowering.cpp
    llvm/test/CodeGen/X86/atomic-non-integer-fp128.ll
    llvm/test/CodeGen/X86/atomic-non-integer.ll
    llvm/test/CodeGen/X86/atomic-unordered.ll
    llvm/test/CodeGen/X86/combineIncDecVector-crash.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index 0e193ba383b5..28031e757dac 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -2642,6 +2642,21 @@ void DAGTypeLegalizer::ExpandIntRes_LLRINT(SDNode *N, SDValue &Lo,
 
 void DAGTypeLegalizer::ExpandIntRes_LOAD(LoadSDNode *N,
                                          SDValue &Lo, SDValue &Hi) {
+  if (N->isAtomic()) {
+    // It's typical to have larger CAS than atomic load instructions.
+    SDLoc dl(N);
+    EVT VT = N->getMemoryVT();
+    SDVTList VTs = DAG.getVTList(VT, MVT::i1, MVT::Other);
+    SDValue Zero = DAG.getConstant(0, dl, VT);
+    SDValue Swap = DAG.getAtomicCmpSwap(
+        ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, dl,
+        VT, VTs, N->getOperand(0),
+        N->getOperand(1), Zero, Zero, N->getMemOperand());
+    ReplaceValueWith(SDValue(N, 0), Swap.getValue(0));
+    ReplaceValueWith(SDValue(N, 1), Swap.getValue(2));
+    return;
+  }
+  
   if (ISD::isNormalLoad(N)) {
     ExpandRes_NormalLoad(N, Lo, Hi);
     return;
@@ -3888,6 +3903,16 @@ SDValue DAGTypeLegalizer::ExpandIntOp_SINT_TO_FP(SDNode *N) {
 }
 
 SDValue DAGTypeLegalizer::ExpandIntOp_STORE(StoreSDNode *N, unsigned OpNo) {
+  if (N->isAtomic()) {
+    // It's typical to have larger CAS than atomic store instructions.
+    SDLoc dl(N);
+    SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
+                                 N->getMemoryVT(),
+                                 N->getOperand(0), N->getOperand(2),
+                                 N->getOperand(1),
+                                 N->getMemOperand());
+    return Swap.getValue(1);
+  }
   if (ISD::isNormalStore(N))
     return ExpandOp_NormalStore(N, OpNo);
 

diff  --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
index 5562f400b6e1..0c0d6ade8b42 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp
@@ -248,6 +248,7 @@ void DAGTypeLegalizer::ExpandRes_NormalLoad(SDNode *N, SDValue &Lo,
   SDLoc dl(N);
 
   LoadSDNode *LD = cast<LoadSDNode>(N);
+  assert(!LD->isAtomic() && "Atomics can not be split");
   EVT ValueVT = LD->getValueType(0);
   EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), ValueVT);
   SDValue Chain = LD->getChain();
@@ -459,6 +460,7 @@ SDValue DAGTypeLegalizer::ExpandOp_NormalStore(SDNode *N, unsigned OpNo) {
   SDLoc dl(N);
 
   StoreSDNode *St = cast<StoreSDNode>(N);
+  assert(!St->isAtomic() && "Atomics can not be split");
   EVT ValueVT = St->getValue().getValueType();
   EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), ValueVT);
   SDValue Chain = St->getChain();

diff  --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 2feba027b68f..4728d99dd615 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -87,7 +87,7 @@ static cl::opt<bool> MulConstantOptimization(
     cl::Hidden);
 
 static cl::opt<bool> ExperimentalUnorderedISEL(
-    "x86-experimental-unordered-atomic-isel", cl::init(false),
+    "x86-experimental-unordered-atomic-isel", cl::init(true),
     cl::desc("Use LoadSDNode and StoreSDNode instead of "
              "AtomicSDNode for unordered atomic loads and "
              "stores respectively."),

diff  --git a/llvm/test/CodeGen/X86/atomic-non-integer-fp128.ll b/llvm/test/CodeGen/X86/atomic-non-integer-fp128.ll
index a3028e9ac7bb..1415cd22c2c5 100644
--- a/llvm/test/CodeGen/X86/atomic-non-integer-fp128.ll
+++ b/llvm/test/CodeGen/X86/atomic-non-integer-fp128.ll
@@ -21,14 +21,7 @@ define void @store_fp128(fp128* %fptr, fp128 %v) {
 ;
 ; X64-SSE-LABEL: store_fp128:
 ; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    subq $24, %rsp
-; X64-SSE-NEXT:    .cfi_def_cfa_offset 32
-; X64-SSE-NEXT:    movaps %xmm0, (%rsp)
-; X64-SSE-NEXT:    movq (%rsp), %rsi
-; X64-SSE-NEXT:    movq {{[0-9]+}}(%rsp), %rdx
-; X64-SSE-NEXT:    callq __sync_lock_test_and_set_16
-; X64-SSE-NEXT:    addq $24, %rsp
-; X64-SSE-NEXT:    .cfi_def_cfa_offset 8
+; X64-SSE-NEXT:    movaps %xmm0, (%rdi)
 ; X64-SSE-NEXT:    retq
   store atomic fp128 %v, fp128* %fptr unordered, align 16
   ret void

diff  --git a/llvm/test/CodeGen/X86/atomic-non-integer.ll b/llvm/test/CodeGen/X86/atomic-non-integer.ll
index 8fd96b749a39..812e703690cb 100644
--- a/llvm/test/CodeGen/X86/atomic-non-integer.ll
+++ b/llvm/test/CodeGen/X86/atomic-non-integer.ll
@@ -114,12 +114,26 @@ define void @store_half(half* %fptr, half %v) {
 }
 
 define void @store_float(float* %fptr, float %v) {
-; X86-LABEL: store_float:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl %ecx, (%eax)
-; X86-NEXT:    retl
+; X86-SSE-LABEL: store_float:
+; X86-SSE:       # %bb.0:
+; X86-SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE-NEXT:    movss %xmm0, (%eax)
+; X86-SSE-NEXT:    retl
+;
+; X86-AVX-LABEL: store_float:
+; X86-AVX:       # %bb.0:
+; X86-AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovss %xmm0, (%eax)
+; X86-AVX-NEXT:    retl
+;
+; X86-NOSSE-LABEL: store_float:
+; X86-NOSSE:       # %bb.0:
+; X86-NOSSE-NEXT:    flds {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT:    fstps (%eax)
+; X86-NOSSE-NEXT:    retl
 ;
 ; X64-SSE-LABEL: store_float:
 ; X64-SSE:       # %bb.0:
@@ -162,16 +176,16 @@ define void @store_double(double* %fptr, double %v) {
 ;
 ; X86-SSE2-LABEL: store_double:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-SSE2-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; X86-SSE2-NEXT:    movlps %xmm0, (%eax)
+; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT:    movsd %xmm0, (%eax)
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: store_double:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X86-AVX-NEXT:    vmovlps %xmm0, (%eax)
+; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT:    vmovsd %xmm0, (%eax)
 ; X86-AVX-NEXT:    retl
 ;
 ; X86-NOSSE-LABEL: store_double:
@@ -276,26 +290,12 @@ define void @store_fp128(fp128* %fptr, fp128 %v) {
 ;
 ; X64-SSE-LABEL: store_fp128:
 ; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    subq $24, %rsp
-; X64-SSE-NEXT:    .cfi_def_cfa_offset 32
-; X64-SSE-NEXT:    movaps %xmm0, (%rsp)
-; X64-SSE-NEXT:    movq (%rsp), %rsi
-; X64-SSE-NEXT:    movq {{[0-9]+}}(%rsp), %rdx
-; X64-SSE-NEXT:    callq __sync_lock_test_and_set_16
-; X64-SSE-NEXT:    addq $24, %rsp
-; X64-SSE-NEXT:    .cfi_def_cfa_offset 8
+; X64-SSE-NEXT:    movaps %xmm0, (%rdi)
 ; X64-SSE-NEXT:    retq
 ;
 ; X64-AVX-LABEL: store_fp128:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    subq $24, %rsp
-; X64-AVX-NEXT:    .cfi_def_cfa_offset 32
-; X64-AVX-NEXT:    vmovaps %xmm0, (%rsp)
-; X64-AVX-NEXT:    movq (%rsp), %rsi
-; X64-AVX-NEXT:    movq {{[0-9]+}}(%rsp), %rdx
-; X64-AVX-NEXT:    callq __sync_lock_test_and_set_16
-; X64-AVX-NEXT:    addq $24, %rsp
-; X64-AVX-NEXT:    .cfi_def_cfa_offset 8
+; X64-AVX-NEXT:    vmovaps %xmm0, (%rdi)
 ; X64-AVX-NEXT:    retq
   store atomic fp128 %v, fp128* %fptr unordered, align 16
   ret void
@@ -383,53 +383,11 @@ define half @load_half(half* %fptr) {
 }
 
 define float @load_float(float* %fptr) {
-; X86-SSE1-LABEL: load_float:
-; X86-SSE1:       # %bb.0:
-; X86-SSE1-NEXT:    pushl %eax
-; X86-SSE1-NEXT:    .cfi_def_cfa_offset 8
-; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-SSE1-NEXT:    movl (%eax), %eax
-; X86-SSE1-NEXT:    movl %eax, (%esp)
-; X86-SSE1-NEXT:    flds (%esp)
-; X86-SSE1-NEXT:    popl %eax
-; X86-SSE1-NEXT:    .cfi_def_cfa_offset 4
-; X86-SSE1-NEXT:    retl
-;
-; X86-SSE2-LABEL: load_float:
-; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pushl %eax
-; X86-SSE2-NEXT:    .cfi_def_cfa_offset 8
-; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-SSE2-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-SSE2-NEXT:    movss %xmm0, (%esp)
-; X86-SSE2-NEXT:    flds (%esp)
-; X86-SSE2-NEXT:    popl %eax
-; X86-SSE2-NEXT:    .cfi_def_cfa_offset 4
-; X86-SSE2-NEXT:    retl
-;
-; X86-AVX-LABEL: load_float:
-; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    pushl %eax
-; X86-AVX-NEXT:    .cfi_def_cfa_offset 8
-; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX-NEXT:    vmovss %xmm0, (%esp)
-; X86-AVX-NEXT:    flds (%esp)
-; X86-AVX-NEXT:    popl %eax
-; X86-AVX-NEXT:    .cfi_def_cfa_offset 4
-; X86-AVX-NEXT:    retl
-;
-; X86-NOSSE-LABEL: load_float:
-; X86-NOSSE:       # %bb.0:
-; X86-NOSSE-NEXT:    pushl %eax
-; X86-NOSSE-NEXT:    .cfi_def_cfa_offset 8
-; X86-NOSSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NOSSE-NEXT:    movl (%eax), %eax
-; X86-NOSSE-NEXT:    movl %eax, (%esp)
-; X86-NOSSE-NEXT:    flds (%esp)
-; X86-NOSSE-NEXT:    popl %eax
-; X86-NOSSE-NEXT:    .cfi_def_cfa_offset 4
-; X86-NOSSE-NEXT:    retl
+; X86-LABEL: load_float:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    flds (%eax)
+; X86-NEXT:    retl
 ;
 ; X64-SSE-LABEL: load_float:
 ; X64-SSE:       # %bb.0:
@@ -445,61 +403,11 @@ define float @load_float(float* %fptr) {
 }
 
 define double @load_double(double* %fptr) {
-; X86-SSE1-LABEL: load_double:
-; X86-SSE1:       # %bb.0:
-; X86-SSE1-NEXT:    subl $20, %esp
-; X86-SSE1-NEXT:    .cfi_def_cfa_offset 24
-; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-SSE1-NEXT:    fildll (%eax)
-; X86-SSE1-NEXT:    fistpll {{[0-9]+}}(%esp)
-; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-SSE1-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-SSE1-NEXT:    movl %ecx, {{[0-9]+}}(%esp)
-; X86-SSE1-NEXT:    movl %eax, (%esp)
-; X86-SSE1-NEXT:    fldl (%esp)
-; X86-SSE1-NEXT:    addl $20, %esp
-; X86-SSE1-NEXT:    .cfi_def_cfa_offset 4
-; X86-SSE1-NEXT:    retl
-;
-; X86-SSE2-LABEL: load_double:
-; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    subl $12, %esp
-; X86-SSE2-NEXT:    .cfi_def_cfa_offset 16
-; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-SSE2-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; X86-SSE2-NEXT:    movlps %xmm0, (%esp)
-; X86-SSE2-NEXT:    fldl (%esp)
-; X86-SSE2-NEXT:    addl $12, %esp
-; X86-SSE2-NEXT:    .cfi_def_cfa_offset 4
-; X86-SSE2-NEXT:    retl
-;
-; X86-AVX-LABEL: load_double:
-; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    subl $12, %esp
-; X86-AVX-NEXT:    .cfi_def_cfa_offset 16
-; X86-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X86-AVX-NEXT:    vmovlps %xmm0, (%esp)
-; X86-AVX-NEXT:    fldl (%esp)
-; X86-AVX-NEXT:    addl $12, %esp
-; X86-AVX-NEXT:    .cfi_def_cfa_offset 4
-; X86-AVX-NEXT:    retl
-;
-; X86-NOSSE-LABEL: load_double:
-; X86-NOSSE:       # %bb.0:
-; X86-NOSSE-NEXT:    subl $20, %esp
-; X86-NOSSE-NEXT:    .cfi_def_cfa_offset 24
-; X86-NOSSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NOSSE-NEXT:    fildll (%eax)
-; X86-NOSSE-NEXT:    fistpll {{[0-9]+}}(%esp)
-; X86-NOSSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NOSSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NOSSE-NEXT:    movl %ecx, {{[0-9]+}}(%esp)
-; X86-NOSSE-NEXT:    movl %eax, (%esp)
-; X86-NOSSE-NEXT:    fldl (%esp)
-; X86-NOSSE-NEXT:    addl $20, %esp
-; X86-NOSSE-NEXT:    .cfi_def_cfa_offset 4
-; X86-NOSSE-NEXT:    retl
+; X86-LABEL: load_double:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    fldl (%eax)
+; X86-NEXT:    retl
 ;
 ; X64-SSE-LABEL: load_double:
 ; X64-SSE:       # %bb.0:
@@ -557,10 +465,10 @@ define fp128 @load_fp128(fp128* %fptr) {
 ; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; X86-SSE-NEXT:    movl %edi, 8(%esi)
-; X86-SSE-NEXT:    movl %edx, 12(%esi)
-; X86-SSE-NEXT:    movl %eax, (%esi)
+; X86-SSE-NEXT:    movl %edi, 12(%esi)
+; X86-SSE-NEXT:    movl %edx, 8(%esi)
 ; X86-SSE-NEXT:    movl %ecx, 4(%esi)
+; X86-SSE-NEXT:    movl %eax, (%esi)
 ; X86-SSE-NEXT:    movl %esi, %eax
 ; X86-SSE-NEXT:    addl $20, %esp
 ; X86-SSE-NEXT:    .cfi_def_cfa_offset 12
@@ -638,10 +546,10 @@ define fp128 @load_fp128(fp128* %fptr) {
 ; X86-NOSSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NOSSE-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-NOSSE-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; X86-NOSSE-NEXT:    movl %edi, 8(%esi)
-; X86-NOSSE-NEXT:    movl %edx, 12(%esi)
-; X86-NOSSE-NEXT:    movl %eax, (%esi)
+; X86-NOSSE-NEXT:    movl %edi, 12(%esi)
+; X86-NOSSE-NEXT:    movl %edx, 8(%esi)
 ; X86-NOSSE-NEXT:    movl %ecx, 4(%esi)
+; X86-NOSSE-NEXT:    movl %eax, (%esi)
 ; X86-NOSSE-NEXT:    movl %esi, %eax
 ; X86-NOSSE-NEXT:    addl $20, %esp
 ; X86-NOSSE-NEXT:    .cfi_def_cfa_offset 12
@@ -653,34 +561,12 @@ define fp128 @load_fp128(fp128* %fptr) {
 ;
 ; X64-SSE-LABEL: load_fp128:
 ; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    subq $24, %rsp
-; X64-SSE-NEXT:    .cfi_def_cfa_offset 32
-; X64-SSE-NEXT:    xorl %esi, %esi
-; X64-SSE-NEXT:    xorl %edx, %edx
-; X64-SSE-NEXT:    xorl %ecx, %ecx
-; X64-SSE-NEXT:    xorl %r8d, %r8d
-; X64-SSE-NEXT:    callq __sync_val_compare_and_swap_16
-; X64-SSE-NEXT:    movq %rdx, {{[0-9]+}}(%rsp)
-; X64-SSE-NEXT:    movq %rax, (%rsp)
-; X64-SSE-NEXT:    movaps (%rsp), %xmm0
-; X64-SSE-NEXT:    addq $24, %rsp
-; X64-SSE-NEXT:    .cfi_def_cfa_offset 8
+; X64-SSE-NEXT:    movaps (%rdi), %xmm0
 ; X64-SSE-NEXT:    retq
 ;
 ; X64-AVX-LABEL: load_fp128:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    subq $24, %rsp
-; X64-AVX-NEXT:    .cfi_def_cfa_offset 32
-; X64-AVX-NEXT:    xorl %esi, %esi
-; X64-AVX-NEXT:    xorl %edx, %edx
-; X64-AVX-NEXT:    xorl %ecx, %ecx
-; X64-AVX-NEXT:    xorl %r8d, %r8d
-; X64-AVX-NEXT:    callq __sync_val_compare_and_swap_16
-; X64-AVX-NEXT:    movq %rdx, {{[0-9]+}}(%rsp)
-; X64-AVX-NEXT:    movq %rax, (%rsp)
-; X64-AVX-NEXT:    vmovaps (%rsp), %xmm0
-; X64-AVX-NEXT:    addq $24, %rsp
-; X64-AVX-NEXT:    .cfi_def_cfa_offset 8
+; X64-AVX-NEXT:    vmovaps (%rdi), %xmm0
 ; X64-AVX-NEXT:    retq
   %v = load atomic fp128, fp128* %fptr unordered, align 16
   ret fp128 %v

diff  --git a/llvm/test/CodeGen/X86/atomic-unordered.ll b/llvm/test/CodeGen/X86/atomic-unordered.ll
index 35055a5adca8..9843bf81e905 100644
--- a/llvm/test/CodeGen/X86/atomic-unordered.ll
+++ b/llvm/test/CodeGen/X86/atomic-unordered.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -O0 < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mcpu=skylake | FileCheck --check-prefixes=CHECK,CHECK-O0,CHECK-O0-CUR %s
-; RUN: llc -O3 < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mcpu=skylake | FileCheck --check-prefixes=CHECK,CHECK-O3,CHECK-O3-CUR %s
-; RUN: llc -O0 < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mcpu=skylake -x86-experimental-unordered-atomic-isel | FileCheck --check-prefixes=CHECK,CHECK-O0,CHECK-O0-EX %s
-; RUN: llc -O3 < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mcpu=skylake -x86-experimental-unordered-atomic-isel | FileCheck --check-prefixes=CHECK,CHECK-O3,CHECK-O3-EX %s
+; RUN: llc -O0 < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mcpu=skylake -x86-experimental-unordered-atomic-isel=0 | FileCheck --check-prefixes=CHECK,CHECK-O0,CHECK-O0-CUR %s
+; RUN: llc -O3 < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mcpu=skylake  -x86-experimental-unordered-atomic-isel=0 | FileCheck --check-prefixes=CHECK,CHECK-O3,CHECK-O3-CUR %s
+; RUN: llc -O0 < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mcpu=skylake -x86-experimental-unordered-atomic-isel=1 | FileCheck --check-prefixes=CHECK,CHECK-O0,CHECK-O0-EX %s
+; RUN: llc -O3 < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mcpu=skylake -x86-experimental-unordered-atomic-isel=1 | FileCheck --check-prefixes=CHECK,CHECK-O3,CHECK-O3-EX %s
 
 define i8 @load_i8(i8* %ptr) {
 ; CHECK-LABEL: load_i8:

diff  --git a/llvm/test/CodeGen/X86/combineIncDecVector-crash.ll b/llvm/test/CodeGen/X86/combineIncDecVector-crash.ll
index 8dea7a5fdcdc..62f3da44bda7 100644
--- a/llvm/test/CodeGen/X86/combineIncDecVector-crash.ll
+++ b/llvm/test/CodeGen/X86/combineIncDecVector-crash.ll
@@ -19,11 +19,11 @@ define void @TestvMeth(i32 %0, i64 %1) gc "statepoint-example" !prof !1 {
 ; CHECK-NEXT:    callq newarray
 ; CHECK-NEXT:  .Ltmp0:
 ; CHECK-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT:    addss (%rax), %xmm0
 ; CHECK-NEXT:    movdqu (%rax), %xmm1
 ; CHECK-NEXT:    pcmpeqd %xmm2, %xmm2
 ; CHECK-NEXT:    psubd %xmm2, %xmm1
 ; CHECK-NEXT:    movdqu %xmm1, (%rax)
+; CHECK-NEXT:    addss {{.*}}(%rip), %xmm0
 ; CHECK-NEXT:    movss %xmm0, (%rax)
 bci_0:
    %token418 = call token (i64, i32, i8 * (i64, i32, i32, i32)*, i32,


        


More information about the llvm-commits mailing list