[llvm] r356360 - [AArch64] Fix bug 35094 atomicrmw on Armv8.1-A+lse

Christof Douma via llvm-commits llvm-commits at lists.llvm.org
Mon Mar 18 02:21:06 PDT 2019


Author: christof
Date: Mon Mar 18 02:21:06 2019
New Revision: 356360

URL: http://llvm.org/viewvc/llvm-project?rev=356360&view=rev
Log:
[AArch64] Fix bug 35094 atomicrmw on Armv8.1-A+lse

Fixes https://bugs.llvm.org/show_bug.cgi?id=35094

The Dead register definition pass should leave alone the atomicrmw
instructions on AArch64 (LTE extension). The reason is the following
statement in the Arm ARM:

"The ST<OP> instructions, and LD<OP> instructions where the destination
register is WZR or XZR, are not regarded as doing a read for the purpose
of a DMB LD barrier."

A good example was given in the gcc thread by Will Deacon (linked in the
bugzilla ticket 35094):

    P0 (atomic_int* y,atomic_int* x) {
      atomic_store_explicit(x,1,memory_order_relaxed);
      atomic_thread_fence(memory_order_release);
      atomic_store_explicit(y,1,memory_order_relaxed);
    }

    P1 (atomic_int* y,atomic_int* x) {
      atomic_fetch_add_explicit(y,1,memory_order_relaxed);  // STADD
      atomic_thread_fence(memory_order_acquire);
      int r0 = atomic_load_explicit(x,memory_order_relaxed);
    }

    P2 (atomic_int* y) {
      int r1 = atomic_load_explicit(y,memory_order_relaxed);
    }

    My understanding is that it is forbidden for r0 == 0 and r1 == 2 after
    this test has executed. However, if the relaxed add in P1 compiles to
    STADD and the subsequent acquire fence is compiled as DMB LD, then we
    don't have any ordering guarantees in P1 and the forbidden result could
    be observed.

Change-Id: I419f9f9df947716932038e1100c18d10a96408d0

Modified:
    llvm/trunk/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp
    llvm/trunk/test/CodeGen/AArch64/atomic-ops-lse.ll

Modified: llvm/trunk/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp?rev=356360&r1=356359&r2=356360&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64DeadRegisterDefinitionsPass.cpp Mon Mar 18 02:21:06 2019
@@ -68,6 +68,51 @@ static bool usesFrameIndex(const Machine
   return false;
 }
 
+// Instructions that lose their 'read' operation for a subesquent fence acquire
+// (DMB LD) once the zero register is used.
+//
+// WARNING: The aquire variants of the instructions are also affected, but they
+// are split out into `atomicBarrierDroppedOnZero()` to support annotations on
+// assembly.
+static bool atomicReadDroppedOnZero(unsigned Opcode) {
+  switch (Opcode) {
+    case AArch64::LDADDB:     case AArch64::LDADDH:
+    case AArch64::LDADDW:     case AArch64::LDADDX:
+    case AArch64::LDADDLB:    case AArch64::LDADDLH:
+    case AArch64::LDADDLW:    case AArch64::LDADDLX:
+    case AArch64::LDCLRB:     case AArch64::LDCLRH:
+    case AArch64::LDCLRW:     case AArch64::LDCLRX:
+    case AArch64::LDCLRLB:    case AArch64::LDCLRLH:
+    case AArch64::LDCLRLW:    case AArch64::LDCLRLX:
+    case AArch64::LDEORB:     case AArch64::LDEORH:
+    case AArch64::LDEORW:     case AArch64::LDEORX:
+    case AArch64::LDEORLB:    case AArch64::LDEORLH:
+    case AArch64::LDEORLW:    case AArch64::LDEORLX:
+    case AArch64::LDSETB:     case AArch64::LDSETH:
+    case AArch64::LDSETW:     case AArch64::LDSETX:
+    case AArch64::LDSETLB:    case AArch64::LDSETLH:
+    case AArch64::LDSETLW:    case AArch64::LDSETLX:
+    case AArch64::LDSMAXB:    case AArch64::LDSMAXH:
+    case AArch64::LDSMAXW:    case AArch64::LDSMAXX:
+    case AArch64::LDSMAXLB:   case AArch64::LDSMAXLH:
+    case AArch64::LDSMAXLW:   case AArch64::LDSMAXLX:
+    case AArch64::LDSMINB:    case AArch64::LDSMINH:
+    case AArch64::LDSMINW:    case AArch64::LDSMINX:
+    case AArch64::LDSMINLB:   case AArch64::LDSMINLH:
+    case AArch64::LDSMINLW:   case AArch64::LDSMINLX:
+    case AArch64::LDUMAXB:    case AArch64::LDUMAXH:
+    case AArch64::LDUMAXW:    case AArch64::LDUMAXX:
+    case AArch64::LDUMAXLB:   case AArch64::LDUMAXLH:
+    case AArch64::LDUMAXLW:   case AArch64::LDUMAXLX:
+    case AArch64::LDUMINB:    case AArch64::LDUMINH:
+    case AArch64::LDUMINW:    case AArch64::LDUMINX:
+    case AArch64::LDUMINLB:   case AArch64::LDUMINLH:
+    case AArch64::LDUMINLW:   case AArch64::LDUMINLX:
+    return true;
+  }
+  return false;
+}
+
 void AArch64DeadRegisterDefinitions::processMachineBasicBlock(
     MachineBasicBlock &MBB) {
   const MachineFunction &MF = *MBB.getParent();
@@ -88,7 +133,7 @@ void AArch64DeadRegisterDefinitions::pro
       continue;
     }
 
-    if (atomicBarrierDroppedOnZero(MI.getOpcode())) {
+    if (atomicBarrierDroppedOnZero(MI.getOpcode()) || atomicReadDroppedOnZero(MI.getOpcode())) {
       LLVM_DEBUG(dbgs() << "    Ignoring, semantics change with xzr/wzr.\n");
       continue;
     }

Modified: llvm/trunk/test/CodeGen/AArch64/atomic-ops-lse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/atomic-ops-lse.ll?rev=356360&r1=356359&r2=356360&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/atomic-ops-lse.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/atomic-ops-lse.ll Mon Mar 18 02:21:06 2019
@@ -1311,7 +1311,7 @@ define void @test_atomic_load_add_i32_no
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
 
-; CHECK: stadd w0, [x[[ADDR]]]
+; CHECK: ldadd w{{[0-9]+}}, w{{[1-9][0-9]*}}, [x[[ADDR]]]
 ; CHECK-NOT: dmb
   ret void
 }
@@ -1323,7 +1323,7 @@ define void @test_atomic_load_add_i64_no
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
 
-; CHECK: stadd x0, [x[[ADDR]]]
+; CHECK: ldadd x{{[0-9]}}, x{{[1-9][0-9]*}}, [x[[ADDR]]]
 ; CHECK-NOT: dmb
   ret void
 }
@@ -1387,7 +1387,7 @@ define void @test_atomic_load_add_i32_no
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
 
-; CHECK: staddl w0, [x[[ADDR]]]
+; CHECK: ldaddl w{{[0-9]+}}, w{{[1-9][0-9]*}}, [x[[ADDR]]]
 ; CHECK-NOT: dmb
   ret void
 }
@@ -1399,7 +1399,7 @@ define void @test_atomic_load_add_i64_no
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
 
-; CHECK: staddl x0, [x[[ADDR]]]
+; CHECK: ldaddl x{{[0-9]+}}, x{{[1-9][0-9]*}}, [x[[ADDR]]]
 ; CHECK-NOT: dmb
   ret void
 }
@@ -1696,7 +1696,7 @@ define void @test_atomic_load_and_i32_no
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
 
-; CHECK: stclr w[[NEW:[0-9]+]], [x[[ADDR]]]
+; CHECK: ldclr w{{[0-9]+}}, w[[NEW:[1-9][0-9]*]], [x[[ADDR]]]
 ; CHECK-NOT: dmb
   ret void
 }
@@ -1709,7 +1709,7 @@ define void @test_atomic_load_and_i64_no
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
 
-; CHECK: stclr x[[NEW:[0-9]+]], [x[[ADDR]]]
+; CHECK: ldclr x{{[0-9]+}}, x[[NEW:[1-9][0-9]*]], [x[[ADDR]]]
 ; CHECK-NOT: dmb
   ret void
 }
@@ -1774,7 +1774,7 @@ define void @test_atomic_load_and_i32_no
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
 
-; CHECK: stclrl w[[NEW:[0-9]+]], [x[[ADDR]]]
+; CHECK: ldclrl w{{[0-9]*}}, w[[NEW:[1-9][0-9]*]], [x[[ADDR]]]
 ; CHECK-NOT: dmb
   ret void
 }
@@ -1787,7 +1787,7 @@ define void @test_atomic_load_and_i64_no
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
 
-; CHECK: stclrl x[[NEW:[0-9]+]], [x[[ADDR]]]
+; CHECK: ldclrl x{{[0-9]*}}, x[[NEW:[1-9][0-9]*]], [x[[ADDR]]]
 ; CHECK-NOT: dmb
   ret void
 }
@@ -2306,7 +2306,7 @@ define void @test_atomic_load_max_i32_no
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
 
-; CHECK: stsmax w0, [x[[ADDR]]]
+; CHECK: ldsmax w{{[0-9]+}}, w{{[1-9][0-9]*}}, [x[[ADDR]]]
 ; CHECK-NOT: dmb
   ret void
 }
@@ -2318,7 +2318,7 @@ define void @test_atomic_load_max_i64_no
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
 
-; CHECK: stsmax x0, [x[[ADDR]]]
+; CHECK: ldsmax x{{[0-9]+}}, x{{[1-9][0-9]*}}, [x[[ADDR]]]
 ; CHECK-NOT: dmb
   ret void
 }
@@ -2382,7 +2382,7 @@ define void @test_atomic_load_max_i32_no
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
 
-; CHECK: stsmaxl w0, [x[[ADDR]]]
+; CHECK: ldsmaxl w{{[0-9]+}}, w{{[1-9][0-9]*}}, [x[[ADDR]]]
 ; CHECK-NOT: dmb
   ret void
 }
@@ -2394,7 +2394,7 @@ define void @test_atomic_load_max_i64_no
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
 
-; CHECK: stsmaxl x0, [x[[ADDR]]]
+; CHECK: ldsmaxl x{{[0-9]+}}, x{{[1-9][0-9]*}}, [x[[ADDR]]]
 ; CHECK-NOT: dmb
   ret void
 }
@@ -2686,7 +2686,7 @@ define void @test_atomic_load_min_i32_no
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
 
-; CHECK: stsmin w0, [x[[ADDR]]]
+; CHECK: ldsmin w{{[0-9]+}}, w{{[1-9][0-9]*}}, [x[[ADDR]]]
 ; CHECK-NOT: dmb
   ret void
 }
@@ -2698,7 +2698,7 @@ define void @test_atomic_load_min_i64_no
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
 
-; CHECK: stsmin x0, [x[[ADDR]]]
+; CHECK: ldsmin x{{[0-9]+}}, x{{[1-9][0-9]*}}, [x[[ADDR]]]
 ; CHECK-NOT: dmb
   ret void
 }
@@ -2762,7 +2762,7 @@ define void @test_atomic_load_min_i32_no
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
 
-; CHECK: stsminl w0, [x[[ADDR]]]
+; CHECK: ldsminl w{{[0-9]+}}, w{{[1-9][0-9]*}}, [x[[ADDR]]]
 ; CHECK-NOT: dmb
   ret void
 }
@@ -2774,7 +2774,7 @@ define void @test_atomic_load_min_i64_no
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
 
-; CHECK: stsminl x0, [x[[ADDR]]]
+; CHECK: ldsminl x{{[0-9]+}}, x{{[1-9][0-9]*}}, [x[[ADDR]]]
 ; CHECK-NOT: dmb
   ret void
 }
@@ -3066,7 +3066,7 @@ define void @test_atomic_load_or_i32_nor
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
 
-; CHECK: stset w0, [x[[ADDR]]]
+; CHECK: ldset w{{[0-9]+}}, w{{[1-9][0-9]*}}, [x[[ADDR]]]
 ; CHECK-NOT: dmb
   ret void
 }
@@ -3078,7 +3078,7 @@ define void @test_atomic_load_or_i64_nor
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
 
-; CHECK: stset x0, [x[[ADDR]]]
+; CHECK: ldset x{{[0-9]+}}, x{{[1-9][0-9]*}}, [x[[ADDR]]]
 ; CHECK-NOT: dmb
   ret void
 }
@@ -3142,7 +3142,7 @@ define void @test_atomic_load_or_i32_nor
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
 
-; CHECK: stsetl w0, [x[[ADDR]]]
+; CHECK: ldsetl w{{[0-9]+}}, w{{[1-9][0-9]*}}, [x[[ADDR]]]
 ; CHECK-NOT: dmb
   ret void
 }
@@ -3154,7 +3154,7 @@ define void @test_atomic_load_or_i64_nor
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
 
-; CHECK: stsetl x0, [x[[ADDR]]]
+; CHECK: ldsetl x{{[0-9]+}}, x{{[1-9][0-9]*}}, [x[[ADDR]]]
 ; CHECK-NOT: dmb
   ret void
 }
@@ -3467,7 +3467,7 @@ define void @test_atomic_load_sub_i32_no
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
 
-; CHECK: stadd w[[NEW:[0-9]+]], [x[[ADDR]]]
+; CHECK: ldadd w{{[0-9]+}}, w[[NEW:[1-9][0-9]*]], [x[[ADDR]]]
 ; CHECK-NOT: dmb
 
   ret void
@@ -3481,7 +3481,7 @@ define void @test_atomic_load_sub_i64_no
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
 
-; CHECK: stadd x[[NEW:[0-9]+]], [x[[ADDR]]]
+; CHECK: ldadd x{{[0-9]+}}, x[[NEW:[1-9][0-9]*]], [x[[ADDR]]]
 ; CHECK-NOT: dmb
 
   ret void
@@ -3551,7 +3551,7 @@ define void @test_atomic_load_sub_i32_no
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
 
-; CHECK: staddl w[[NEW:[0-9]+]], [x[[ADDR]]]
+; CHECK: ldaddl w{{[0-9]*}}, w[[NEW:[1-9][0-9]*]], [x[[ADDR]]]
 ; CHECK-NOT: dmb
 
   ret void
@@ -3565,7 +3565,7 @@ define void @test_atomic_load_sub_i64_no
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
 
-; CHECK: staddl x[[NEW:[0-9]+]], [x[[ADDR]]]
+; CHECK: ldaddl x{{[0-9]*}}, x[[NEW:[1-9][0-9]*]], [x[[ADDR]]]
 ; CHECK-NOT: dmb
 
   ret void
@@ -4256,7 +4256,7 @@ define void @test_atomic_load_umax_i32_n
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
 
-; CHECK: stumax w0, [x[[ADDR]]]
+; CHECK: ldumax w{{[0-9]+}}, w{{[1-9][0-9]*}}, [x[[ADDR]]]
 ; CHECK-NOT: dmb
   ret void
 }
@@ -4268,7 +4268,7 @@ define void @test_atomic_load_umax_i64_n
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
 
-; CHECK: stumax x0, [x[[ADDR]]]
+; CHECK: ldumax x{{[0-9]+}}, x{{[1-9][0-9]*}}, [x[[ADDR]]]
 ; CHECK-NOT: dmb
   ret void
 }
@@ -4332,7 +4332,7 @@ define void @test_atomic_load_umax_i32_n
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
 
-; CHECK: stumaxl w0, [x[[ADDR]]]
+; CHECK: ldumaxl w{{[0-9]+}}, w{{[1-9][0-9]*}}, [x[[ADDR]]]
 ; CHECK-NOT: dmb
   ret void
 }
@@ -4344,7 +4344,7 @@ define void @test_atomic_load_umax_i64_n
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
 
-; CHECK: stumaxl x0, [x[[ADDR]]]
+; CHECK: ldumaxl x{{[0-9]+}}, x{{[1-9][0-9]*}}, [x[[ADDR]]]
 ; CHECK-NOT: dmb
   ret void
 }
@@ -4636,7 +4636,7 @@ define void @test_atomic_load_umin_i32_n
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
 
-; CHECK: stumin w0, [x[[ADDR]]]
+; CHECK: ldumin w{{[0-9]+}}, w{{[1-9][0-9]*}}, [x[[ADDR]]]
 ; CHECK-NOT: dmb
   ret void
 }
@@ -4648,7 +4648,7 @@ define void @test_atomic_load_umin_i64_n
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
 
-; CHECK: stumin x0, [x[[ADDR]]]
+; CHECK: ldumin x{{[0-9]+}}, x{{[1-9][0-9]*}}, [x[[ADDR]]]
 ; CHECK-NOT: dmb
   ret void
 }
@@ -4712,7 +4712,7 @@ define void @test_atomic_load_umin_i32_n
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
 
-; CHECK: stuminl w0, [x[[ADDR]]]
+; CHECK: lduminl w{{[0-9]+}}, w{{[1-9][0-9]*}}, [x[[ADDR]]]
 ; CHECK-NOT: dmb
   ret void
 }
@@ -4724,7 +4724,7 @@ define void @test_atomic_load_umin_i64_n
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
 
-; CHECK: stuminl x0, [x[[ADDR]]]
+; CHECK: lduminl x{{[0-9]+}}, x{{[1-9][0-9]*}}, [x[[ADDR]]]
 ; CHECK-NOT: dmb
   ret void
 }
@@ -5016,7 +5016,7 @@ define void @test_atomic_load_xor_i32_no
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
 
-; CHECK: steor w0, [x[[ADDR]]]
+; CHECK: ldeor w{{[0-9]+}}, w{{[1-9][0-9]*}}, [x[[ADDR]]]
 ; CHECK-NOT: dmb
   ret void
 }
@@ -5028,7 +5028,7 @@ define void @test_atomic_load_xor_i64_no
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
 
-; CHECK: steor x0, [x[[ADDR]]]
+; CHECK: ldeor x{{[0-9]+}}, x{{[1-9][0-9]*}}, [x[[ADDR]]]
 ; CHECK-NOT: dmb
   ret void
 }
@@ -5092,7 +5092,7 @@ define void @test_atomic_load_xor_i32_no
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var32
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var32
 
-; CHECK: steorl w0, [x[[ADDR]]]
+; CHECK: ldeorl w{{[0-9]+}}, w{{[1-9][0-9]*}}, [x[[ADDR]]]
 ; CHECK-NOT: dmb
   ret void
 }
@@ -5104,7 +5104,7 @@ define void @test_atomic_load_xor_i64_no
 ; CHECK: adrp [[TMPADDR:x[0-9]+]], var64
 ; CHECK: add x[[ADDR:[0-9]+]], [[TMPADDR]], {{#?}}:lo12:var64
 
-; CHECK: steorl x0, [x[[ADDR]]]
+; CHECK: ldeorl x{{[0-9]+}}, x{{[1-9][0-9]*}}, [x[[ADDR]]]
 ; CHECK-NOT: dmb
   ret void
 }




More information about the llvm-commits mailing list