[llvm] e086b24 - [M68k] Add support for atomic instructions

via llvm-commits llvm-commits at lists.llvm.org
Wed Nov 9 02:38:42 PST 2022


Author: Sheng
Date: 2022-11-09T18:37:03+08:00
New Revision: e086b24d153016b7c001bf945afcbac0a5db9019

URL: https://github.com/llvm/llvm-project/commit/e086b24d153016b7c001bf945afcbac0a5db9019
DIFF: https://github.com/llvm/llvm-project/commit/e086b24d153016b7c001bf945afcbac0a5db9019.diff

LOG: [M68k] Add support for atomic instructions

This adds support for atomic_load, atomic_store, atomic_cmpxchg
and atomic_rmw

Fixes #48236

Reviewed by: myhsu, efriedma

Differential Revision: https://reviews.llvm.org/D136525

Added: 
    llvm/lib/Target/M68k/M68kInstrAtomics.td
    llvm/test/CodeGen/M68k/Atomics/cmpxchg.ll
    llvm/test/CodeGen/M68k/Atomics/load-store.ll
    llvm/test/CodeGen/M68k/Atomics/rmw.ll
    llvm/test/MC/Disassembler/M68k/atomics.txt
    llvm/test/MC/M68k/Atomics/cas.s

Modified: 
    llvm/lib/Target/M68k/M68kISelLowering.cpp
    llvm/lib/Target/M68k/M68kISelLowering.h
    llvm/lib/Target/M68k/M68kInstrInfo.td
    llvm/lib/Target/M68k/M68kTargetMachine.cpp
    llvm/test/CodeGen/M68k/pipeline.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/M68k/M68kISelLowering.cpp b/llvm/lib/Target/M68k/M68kISelLowering.cpp
index 82b4e7ae1f364..87ce56a5b9a92 100644
--- a/llvm/lib/Target/M68k/M68kISelLowering.cpp
+++ b/llvm/lib/Target/M68k/M68kISelLowering.cpp
@@ -157,11 +157,42 @@ M68kTargetLowering::M68kTargetLowering(const M68kTargetMachine &TM,
 
   computeRegisterProperties(STI.getRegisterInfo());
 
+  // We lower the `atomic-compare-and-swap` to `__sync_val_compare_and_swap`
+  // for subtarget < M68020
+  setMaxAtomicSizeInBitsSupported(32);
+  setOperationAction(ISD::ATOMIC_CMP_SWAP, {MVT::i8, MVT::i16, MVT::i32},
+                     Subtarget.atLeastM68020() ? Legal : LibCall);
+
+  // M68k does not have native read-modify-write support, so expand all of them
+  // to `__sync_fetch_*` for target < M68020, otherwise expand to CmpxChg.
+  // See `shouldExpandAtomicRMWInIR` below.
+  setOperationAction(
+      {
+          ISD::ATOMIC_LOAD_ADD,
+          ISD::ATOMIC_LOAD_SUB,
+          ISD::ATOMIC_LOAD_AND,
+          ISD::ATOMIC_LOAD_OR,
+          ISD::ATOMIC_LOAD_XOR,
+          ISD::ATOMIC_LOAD_NAND,
+          ISD::ATOMIC_LOAD_MIN,
+          ISD::ATOMIC_LOAD_MAX,
+          ISD::ATOMIC_LOAD_UMIN,
+          ISD::ATOMIC_LOAD_UMAX,
+      },
+      {MVT::i8, MVT::i16, MVT::i32}, LibCall);
+
   // 2^2 bytes
   // FIXME can it be just 2^1?
   setMinFunctionAlignment(Align::Constant<2>());
 }
 
+TargetLoweringBase::AtomicExpansionKind
+M68kTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
+  return Subtarget.atLeastM68020()
+             ? TargetLoweringBase::AtomicExpansionKind::CmpXChg
+             : TargetLoweringBase::AtomicExpansionKind::None;
+}
+
 EVT M68kTargetLowering::getSetCCResultType(const DataLayout &DL,
                                            LLVMContext &Context, EVT VT) const {
   // M68k SETcc producess either 0x00 or 0xFF

diff  --git a/llvm/lib/Target/M68k/M68kISelLowering.h b/llvm/lib/Target/M68k/M68kISelLowering.h
index f759a7d939c87..ec525bc4c6b3c 100644
--- a/llvm/lib/Target/M68k/M68kISelLowering.h
+++ b/llvm/lib/Target/M68k/M68kISelLowering.h
@@ -174,6 +174,9 @@ class M68kTargetLowering : public TargetLowering {
   CCAssignFn *getCCAssignFn(CallingConv::ID CC, bool Return,
                             bool IsVarArg) const;
 
+  AtomicExpansionKind
+  shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const override;
+
 private:
   unsigned GetAlignedArgumentStackSize(unsigned StackSize,
                                        SelectionDAG &DAG) const;

diff  --git a/llvm/lib/Target/M68k/M68kInstrAtomics.td b/llvm/lib/Target/M68k/M68kInstrAtomics.td
new file mode 100644
index 0000000000000..6be53d469bbcd
--- /dev/null
+++ b/llvm/lib/Target/M68k/M68kInstrAtomics.td
@@ -0,0 +1,46 @@
+//===-- M68kInstrAtomics.td - Atomics Instructions ---------*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+foreach size = [8, 16, 32] in {
+  def : Pat<(!cast<SDPatternOperator>("atomic_load_"#size) MxCP_ARI:$ptr),
+            (!cast<MxInst>("MOV"#size#"dj") !cast<MxMemOp>("MxARI"#size):$ptr)>;
+
+  def : Pat<(!cast<SDPatternOperator>("atomic_store_"#size) MxCP_ARI:$ptr,
+                                                            !cast<MxRegOp>("MxDRD"#size):$val),
+            (!cast<MxInst>("MOV"#size#"jd") !cast<MxMemOp>("MxARI"#size):$ptr,
+                                            !cast<MxRegOp>("MxDRD"#size):$val)>;
+}
+
+let Predicates = [AtLeastM68020] in {
+class MxCASOp<bits<2> size_encoding, MxType type>
+    : MxInst<(outs type.ROp:$out),
+             (ins type.ROp:$dc, type.ROp:$du, !cast<MxMemOp>("MxARI"#type.Size):$mem),
+             "cas."#type.Prefix#" $dc, $du, $mem"> {
+  let Inst = (ascend
+                (descend 0b00001, size_encoding, 0b011, MxEncAddrMode_j<"mem">.EA),
+                (descend 0b0000000, (operand "$du", 3), 0b000, (operand "$dc", 3))
+              );
+  let Constraints = "$out = $dc";
+  let mayLoad = 1;
+  let mayStore = 1;
+}
+
+def CAS8  : MxCASOp<0x1, MxType8d>;
+def CAS16 : MxCASOp<0x2, MxType16d>;
+def CAS32 : MxCASOp<0x3, MxType32d>;
+
+
+foreach size = [8, 16, 32] in {
+  def : Pat<(!cast<SDPatternOperator>("atomic_cmp_swap_"#size) MxCP_ARI:$ptr,
+                                                                !cast<MxRegOp>("MxDRD"#size):$cmp,
+                                                                !cast<MxRegOp>("MxDRD"#size):$new),
+            (!cast<MxInst>("CAS"#size) !cast<MxRegOp>("MxDRD"#size):$cmp,
+                                       !cast<MxRegOp>("MxDRD"#size):$new,
+                                       !cast<MxMemOp>("MxARI"#size):$ptr)>;
+}
+} // let Predicates = [AtLeastM68020]

diff  --git a/llvm/lib/Target/M68k/M68kInstrInfo.td b/llvm/lib/Target/M68k/M68kInstrInfo.td
index f71d55c33feca..80c39e29ce111 100644
--- a/llvm/lib/Target/M68k/M68kInstrInfo.td
+++ b/llvm/lib/Target/M68k/M68kInstrInfo.td
@@ -781,5 +781,6 @@ include "M68kInstrShiftRotate.td"
 include "M68kInstrBits.td"
 include "M68kInstrArithmetic.td"
 include "M68kInstrControl.td"
+include "M68kInstrAtomics.td"
 
 include "M68kInstrCompiler.td"

diff  --git a/llvm/lib/Target/M68k/M68kTargetMachine.cpp b/llvm/lib/Target/M68k/M68kTargetMachine.cpp
index f781e1ea55e87..cf0d296d69711 100644
--- a/llvm/lib/Target/M68k/M68kTargetMachine.cpp
+++ b/llvm/lib/Target/M68k/M68kTargetMachine.cpp
@@ -143,6 +143,7 @@ class M68kPassConfig : public TargetPassConfig {
   const M68kSubtarget &getM68kSubtarget() const {
     return *getM68kTargetMachine().getSubtargetImpl();
   }
+  void addIRPasses() override;
   bool addIRTranslator() override;
   bool addLegalizeMachineIR() override;
   bool addRegBankSelect() override;
@@ -157,6 +158,11 @@ TargetPassConfig *M68kTargetMachine::createPassConfig(PassManagerBase &PM) {
   return new M68kPassConfig(*this, PM);
 }
 
+void M68kPassConfig::addIRPasses() {
+  addPass(createAtomicExpandPass());
+  TargetPassConfig::addIRPasses();
+}
+
 bool M68kPassConfig::addInstSelector() {
   // Install an instruction selector.
   addPass(createM68kISelDag(getM68kTargetMachine()));

diff  --git a/llvm/test/CodeGen/M68k/Atomics/cmpxchg.ll b/llvm/test/CodeGen/M68k/Atomics/cmpxchg.ll
new file mode 100644
index 0000000000000..b018ea4af4aa7
--- /dev/null
+++ b/llvm/test/CodeGen/M68k/Atomics/cmpxchg.ll
@@ -0,0 +1,136 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc %s -o - -mtriple=m68k -mcpu=M68000 | FileCheck %s --check-prefix=NO-ATOMIC
+; RUN: llc %s -o - -mtriple=m68k -mcpu=M68010 | FileCheck %s --check-prefix=NO-ATOMIC
+; RUN: llc %s -o - -mtriple=m68k -mcpu=M68020 | FileCheck %s --check-prefix=ATOMIC
+; RUN: llc %s -o - -mtriple=m68k -mcpu=M68030 | FileCheck %s --check-prefix=ATOMIC
+; RUN: llc %s -o - -mtriple=m68k -mcpu=M68040 | FileCheck %s --check-prefix=ATOMIC
+
+define i1 @cmpxchg_i8_monotonic_monotonic(i8 %cmp, i8 %new, ptr %mem) nounwind {
+; NO-ATOMIC-LABEL: cmpxchg_i8_monotonic_monotonic:
+; NO-ATOMIC:       ; %bb.0:
+; NO-ATOMIC-NEXT:    suba.l #20, %sp
+; NO-ATOMIC-NEXT:    movem.l %d2, (16,%sp) ; 8-byte Folded Spill
+; NO-ATOMIC-NEXT:    move.b (31,%sp), %d0
+; NO-ATOMIC-NEXT:    and.l #255, %d0
+; NO-ATOMIC-NEXT:    move.l %d0, (8,%sp)
+; NO-ATOMIC-NEXT:    move.b (27,%sp), %d2
+; NO-ATOMIC-NEXT:    move.l %d2, %d0
+; NO-ATOMIC-NEXT:    and.l #255, %d0
+; NO-ATOMIC-NEXT:    move.l %d0, (4,%sp)
+; NO-ATOMIC-NEXT:    move.l (32,%sp), (%sp)
+; NO-ATOMIC-NEXT:    jsr __sync_val_compare_and_swap_1 at PLT
+; NO-ATOMIC-NEXT:    sub.b %d2, %d0
+; NO-ATOMIC-NEXT:    seq %d0
+; NO-ATOMIC-NEXT:    movem.l (16,%sp), %d2 ; 8-byte Folded Reload
+; NO-ATOMIC-NEXT:    adda.l #20, %sp
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: cmpxchg_i8_monotonic_monotonic:
+; ATOMIC:       ; %bb.0:
+; ATOMIC-NEXT:    suba.l #4, %sp
+; ATOMIC-NEXT:    movem.l %d2, (0,%sp) ; 8-byte Folded Spill
+; ATOMIC-NEXT:    move.l (16,%sp), %a0
+; ATOMIC-NEXT:    move.b (15,%sp), %d0
+; ATOMIC-NEXT:    move.b (11,%sp), %d1
+; ATOMIC-NEXT:    move.b %d1, %d2
+; ATOMIC-NEXT:    cas.b %d2, %d0, (%a0)
+; ATOMIC-NEXT:    sub.b %d1, %d2
+; ATOMIC-NEXT:    seq %d0
+; ATOMIC-NEXT:    movem.l (0,%sp), %d2 ; 8-byte Folded Reload
+; ATOMIC-NEXT:    adda.l #4, %sp
+; ATOMIC-NEXT:    rts
+  %res = cmpxchg ptr %mem, i8 %cmp, i8 %new monotonic monotonic
+  %val = extractvalue {i8, i1} %res, 1
+  ret i1 %val
+}
+
+define i16 @cmpxchg_i16_release_monotonic(i16 %cmp, i16 %new, ptr %mem) nounwind {
+; NO-ATOMIC-LABEL: cmpxchg_i16_release_monotonic:
+; NO-ATOMIC:       ; %bb.0:
+; NO-ATOMIC-NEXT:    suba.l #12, %sp
+; NO-ATOMIC-NEXT:    move.w (22,%sp), %d0
+; NO-ATOMIC-NEXT:    and.l #65535, %d0
+; NO-ATOMIC-NEXT:    move.l %d0, (8,%sp)
+; NO-ATOMIC-NEXT:    move.w (18,%sp), %d0
+; NO-ATOMIC-NEXT:    and.l #65535, %d0
+; NO-ATOMIC-NEXT:    move.l %d0, (4,%sp)
+; NO-ATOMIC-NEXT:    move.l (24,%sp), (%sp)
+; NO-ATOMIC-NEXT:    jsr __sync_val_compare_and_swap_2 at PLT
+; NO-ATOMIC-NEXT:    adda.l #12, %sp
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: cmpxchg_i16_release_monotonic:
+; ATOMIC:       ; %bb.0:
+; ATOMIC-NEXT:    move.l (12,%sp), %a0
+; ATOMIC-NEXT:    move.w (10,%sp), %d1
+; ATOMIC-NEXT:    move.w (6,%sp), %d0
+; ATOMIC-NEXT:    cas.w %d0, %d1, (%a0)
+; ATOMIC-NEXT:    rts
+  %res = cmpxchg ptr %mem, i16 %cmp, i16 %new release monotonic
+  %val = extractvalue {i16, i1} %res, 0
+  ret i16 %val
+}
+
+define i32 @cmpxchg_i32_release_acquire(i32 %cmp, i32 %new, ptr %mem) nounwind {
+; NO-ATOMIC-LABEL: cmpxchg_i32_release_acquire:
+; NO-ATOMIC:       ; %bb.0:
+; NO-ATOMIC-NEXT:    suba.l #12, %sp
+; NO-ATOMIC-NEXT:    move.l (20,%sp), (8,%sp)
+; NO-ATOMIC-NEXT:    move.l (16,%sp), (4,%sp)
+; NO-ATOMIC-NEXT:    move.l (24,%sp), (%sp)
+; NO-ATOMIC-NEXT:    jsr __sync_val_compare_and_swap_4 at PLT
+; NO-ATOMIC-NEXT:    adda.l #12, %sp
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: cmpxchg_i32_release_acquire:
+; ATOMIC:       ; %bb.0:
+; ATOMIC-NEXT:    move.l (12,%sp), %a0
+; ATOMIC-NEXT:    move.l (8,%sp), %d1
+; ATOMIC-NEXT:    move.l (4,%sp), %d0
+; ATOMIC-NEXT:    cas.l %d0, %d1, (%a0)
+; ATOMIC-NEXT:    rts
+  %res = cmpxchg ptr %mem, i32 %cmp, i32 %new release acquire
+  %val = extractvalue {i32, i1} %res, 0
+  ret i32 %val
+}
+
+define i64 @cmpxchg_i64_seqcst_seqcst(i64 %cmp, i64 %new, ptr %mem) nounwind {
+; NO-ATOMIC-LABEL: cmpxchg_i64_seqcst_seqcst:
+; NO-ATOMIC:       ; %bb.0:
+; NO-ATOMIC-NEXT:    suba.l #36, %sp
+; NO-ATOMIC-NEXT:    move.l (44,%sp), (28,%sp)
+; NO-ATOMIC-NEXT:    move.l (40,%sp), (24,%sp)
+; NO-ATOMIC-NEXT:    lea (24,%sp), %a0
+; NO-ATOMIC-NEXT:    move.l %a0, (4,%sp)
+; NO-ATOMIC-NEXT:    move.l #5, (20,%sp)
+; NO-ATOMIC-NEXT:    move.l #5, (16,%sp)
+; NO-ATOMIC-NEXT:    move.l (52,%sp), (12,%sp)
+; NO-ATOMIC-NEXT:    move.l (48,%sp), (8,%sp)
+; NO-ATOMIC-NEXT:    move.l (56,%sp), (%sp)
+; NO-ATOMIC-NEXT:    jsr __atomic_compare_exchange_8 at PLT
+; NO-ATOMIC-NEXT:    move.l (28,%sp), %d1
+; NO-ATOMIC-NEXT:    move.l (24,%sp), %d0
+; NO-ATOMIC-NEXT:    adda.l #36, %sp
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: cmpxchg_i64_seqcst_seqcst:
+; ATOMIC:       ; %bb.0:
+; ATOMIC-NEXT:    suba.l #36, %sp
+; ATOMIC-NEXT:    move.l (44,%sp), (28,%sp)
+; ATOMIC-NEXT:    move.l (40,%sp), (24,%sp)
+; ATOMIC-NEXT:    lea (24,%sp), %a0
+; ATOMIC-NEXT:    move.l %a0, (4,%sp)
+; ATOMIC-NEXT:    move.l #5, (20,%sp)
+; ATOMIC-NEXT:    move.l #5, (16,%sp)
+; ATOMIC-NEXT:    move.l (52,%sp), (12,%sp)
+; ATOMIC-NEXT:    move.l (48,%sp), (8,%sp)
+; ATOMIC-NEXT:    move.l (56,%sp), (%sp)
+; ATOMIC-NEXT:    jsr __atomic_compare_exchange_8 at PLT
+; ATOMIC-NEXT:    move.l (28,%sp), %d1
+; ATOMIC-NEXT:    move.l (24,%sp), %d0
+; ATOMIC-NEXT:    adda.l #36, %sp
+; ATOMIC-NEXT:    rts
+  %res = cmpxchg ptr %mem, i64 %cmp, i64 %new seq_cst seq_cst
+  %val = extractvalue {i64, i1} %res, 0
+  ret i64 %val
+}

diff  --git a/llvm/test/CodeGen/M68k/Atomics/load-store.ll b/llvm/test/CodeGen/M68k/Atomics/load-store.ll
new file mode 100644
index 0000000000000..7608edb220d3c
--- /dev/null
+++ b/llvm/test/CodeGen/M68k/Atomics/load-store.ll
@@ -0,0 +1,606 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc %s -o - -mtriple=m68k -mcpu=M68000 | FileCheck %s --check-prefix=NO-ATOMIC
+; RUN: llc %s -o - -mtriple=m68k -mcpu=M68010 | FileCheck %s --check-prefix=NO-ATOMIC
+; RUN: llc %s -o - -mtriple=m68k -mcpu=M68020 | FileCheck %s --check-prefix=ATOMIC
+; RUN: llc %s -o - -mtriple=m68k -mcpu=M68030 | FileCheck %s --check-prefix=ATOMIC
+; RUN: llc %s -o - -mtriple=m68k -mcpu=M68040 | FileCheck %s --check-prefix=ATOMIC
+
+define i8 @atomic_load_i8_unordered(i8 *%a) nounwind {
+; NO-ATOMIC-LABEL: atomic_load_i8_unordered:
+; NO-ATOMIC:       ; %bb.0:
+; NO-ATOMIC-NEXT:    move.l (4,%sp), %a0
+; NO-ATOMIC-NEXT:    move.b (%a0), %d0
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: atomic_load_i8_unordered:
+; ATOMIC:       ; %bb.0:
+; ATOMIC-NEXT:    move.l (4,%sp), %a0
+; ATOMIC-NEXT:    move.b (%a0), %d0
+; ATOMIC-NEXT:    rts
+  %1 = load atomic i8, i8* %a unordered, align 1
+  ret i8 %1
+}
+
+define i8 @atomic_load_i8_monotonic(i8 *%a) nounwind {
+; NO-ATOMIC-LABEL: atomic_load_i8_monotonic:
+; NO-ATOMIC:       ; %bb.0:
+; NO-ATOMIC-NEXT:    move.l (4,%sp), %a0
+; NO-ATOMIC-NEXT:    move.b (%a0), %d0
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: atomic_load_i8_monotonic:
+; ATOMIC:       ; %bb.0:
+; ATOMIC-NEXT:    move.l (4,%sp), %a0
+; ATOMIC-NEXT:    move.b (%a0), %d0
+; ATOMIC-NEXT:    rts
+  %1 = load atomic i8, i8* %a monotonic, align 1
+  ret i8 %1
+}
+
+define i8 @atomic_load_i8_acquire(i8 *%a) nounwind {
+; NO-ATOMIC-LABEL: atomic_load_i8_acquire:
+; NO-ATOMIC:       ; %bb.0:
+; NO-ATOMIC-NEXT:    move.l (4,%sp), %a0
+; NO-ATOMIC-NEXT:    move.b (%a0), %d0
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: atomic_load_i8_acquire:
+; ATOMIC:       ; %bb.0:
+; ATOMIC-NEXT:    move.l (4,%sp), %a0
+; ATOMIC-NEXT:    move.b (%a0), %d0
+; ATOMIC-NEXT:    rts
+  %1 = load atomic i8, i8* %a acquire, align 1
+  ret i8 %1
+}
+
+define i8 @atomic_load_i8_seq_cst(i8 *%a) nounwind {
+; NO-ATOMIC-LABEL: atomic_load_i8_seq_cst:
+; NO-ATOMIC:       ; %bb.0:
+; NO-ATOMIC-NEXT:    move.l (4,%sp), %a0
+; NO-ATOMIC-NEXT:    move.b (%a0), %d0
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: atomic_load_i8_seq_cst:
+; ATOMIC:       ; %bb.0:
+; ATOMIC-NEXT:    move.l (4,%sp), %a0
+; ATOMIC-NEXT:    move.b (%a0), %d0
+; ATOMIC-NEXT:    rts
+  %1 = load atomic i8, i8* %a seq_cst, align 1
+  ret i8 %1
+}
+
+define i16 @atomic_load_i16_unordered(i16 *%a) nounwind {
+; NO-ATOMIC-LABEL: atomic_load_i16_unordered:
+; NO-ATOMIC:       ; %bb.0:
+; NO-ATOMIC-NEXT:    move.l (4,%sp), %a0
+; NO-ATOMIC-NEXT:    move.w (%a0), %d0
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: atomic_load_i16_unordered:
+; ATOMIC:       ; %bb.0:
+; ATOMIC-NEXT:    move.l (4,%sp), %a0
+; ATOMIC-NEXT:    move.w (%a0), %d0
+; ATOMIC-NEXT:    rts
+  %1 = load atomic i16, i16* %a unordered, align 2
+  ret i16 %1
+}
+
+define i16 @atomic_load_i16_monotonic(i16 *%a) nounwind {
+; NO-ATOMIC-LABEL: atomic_load_i16_monotonic:
+; NO-ATOMIC:       ; %bb.0:
+; NO-ATOMIC-NEXT:    move.l (4,%sp), %a0
+; NO-ATOMIC-NEXT:    move.w (%a0), %d0
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: atomic_load_i16_monotonic:
+; ATOMIC:       ; %bb.0:
+; ATOMIC-NEXT:    move.l (4,%sp), %a0
+; ATOMIC-NEXT:    move.w (%a0), %d0
+; ATOMIC-NEXT:    rts
+  %1 = load atomic i16, i16* %a monotonic, align 2
+  ret i16 %1
+}
+
+define i16 @atomic_load_i16_acquire(i16 *%a) nounwind {
+; NO-ATOMIC-LABEL: atomic_load_i16_acquire:
+; NO-ATOMIC:       ; %bb.0:
+; NO-ATOMIC-NEXT:    move.l (4,%sp), %a0
+; NO-ATOMIC-NEXT:    move.w (%a0), %d0
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: atomic_load_i16_acquire:
+; ATOMIC:       ; %bb.0:
+; ATOMIC-NEXT:    move.l (4,%sp), %a0
+; ATOMIC-NEXT:    move.w (%a0), %d0
+; ATOMIC-NEXT:    rts
+  %1 = load atomic i16, i16* %a acquire, align 2
+  ret i16 %1
+}
+
+define i16 @atomic_load_i16_seq_cst(i16 *%a) nounwind {
+; NO-ATOMIC-LABEL: atomic_load_i16_seq_cst:
+; NO-ATOMIC:       ; %bb.0:
+; NO-ATOMIC-NEXT:    move.l (4,%sp), %a0
+; NO-ATOMIC-NEXT:    move.w (%a0), %d0
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: atomic_load_i16_seq_cst:
+; ATOMIC:       ; %bb.0:
+; ATOMIC-NEXT:    move.l (4,%sp), %a0
+; ATOMIC-NEXT:    move.w (%a0), %d0
+; ATOMIC-NEXT:    rts
+  %1 = load atomic i16, i16* %a seq_cst, align 2
+  ret i16 %1
+}
+
+define i32 @atomic_load_i32_unordered(i32 *%a) nounwind {
+; NO-ATOMIC-LABEL: atomic_load_i32_unordered:
+; NO-ATOMIC:       ; %bb.0:
+; NO-ATOMIC-NEXT:    move.l (4,%sp), %a0
+; NO-ATOMIC-NEXT:    move.l (%a0), %d0
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: atomic_load_i32_unordered:
+; ATOMIC:       ; %bb.0:
+; ATOMIC-NEXT:    move.l (4,%sp), %a0
+; ATOMIC-NEXT:    move.l (%a0), %d0
+; ATOMIC-NEXT:    rts
+  %1 = load atomic i32, i32* %a unordered, align 4
+  ret i32 %1
+}
+
+define i32 @atomic_load_i32_monotonic(i32 *%a) nounwind {
+; NO-ATOMIC-LABEL: atomic_load_i32_monotonic:
+; NO-ATOMIC:       ; %bb.0:
+; NO-ATOMIC-NEXT:    move.l (4,%sp), %a0
+; NO-ATOMIC-NEXT:    move.l (%a0), %d0
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: atomic_load_i32_monotonic:
+; ATOMIC:       ; %bb.0:
+; ATOMIC-NEXT:    move.l (4,%sp), %a0
+; ATOMIC-NEXT:    move.l (%a0), %d0
+; ATOMIC-NEXT:    rts
+  %1 = load atomic i32, i32* %a monotonic, align 4
+  ret i32 %1
+}
+
+define i32 @atomic_load_i32_acquire(i32 *%a) nounwind {
+; NO-ATOMIC-LABEL: atomic_load_i32_acquire:
+; NO-ATOMIC:       ; %bb.0:
+; NO-ATOMIC-NEXT:    move.l (4,%sp), %a0
+; NO-ATOMIC-NEXT:    move.l (%a0), %d0
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: atomic_load_i32_acquire:
+; ATOMIC:       ; %bb.0:
+; ATOMIC-NEXT:    move.l (4,%sp), %a0
+; ATOMIC-NEXT:    move.l (%a0), %d0
+; ATOMIC-NEXT:    rts
+  %1 = load atomic i32, i32* %a acquire, align 4
+  ret i32 %1
+}
+
+define i32 @atomic_load_i32_seq_cst(i32 *%a) nounwind {
+; NO-ATOMIC-LABEL: atomic_load_i32_seq_cst:
+; NO-ATOMIC:       ; %bb.0:
+; NO-ATOMIC-NEXT:    move.l (4,%sp), %a0
+; NO-ATOMIC-NEXT:    move.l (%a0), %d0
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: atomic_load_i32_seq_cst:
+; ATOMIC:       ; %bb.0:
+; ATOMIC-NEXT:    move.l (4,%sp), %a0
+; ATOMIC-NEXT:    move.l (%a0), %d0
+; ATOMIC-NEXT:    rts
+  %1 = load atomic i32, i32* %a seq_cst, align 4
+  ret i32 %1
+}
+
+define i64 @atomic_load_i64_unordered(i64 *%a) nounwind {
+; NO-ATOMIC-LABEL: atomic_load_i64_unordered:
+; NO-ATOMIC:       ; %bb.0:
+; NO-ATOMIC-NEXT:    suba.l #12, %sp
+; NO-ATOMIC-NEXT:    move.l #0, (4,%sp)
+; NO-ATOMIC-NEXT:    move.l (16,%sp), (%sp)
+; NO-ATOMIC-NEXT:    jsr __atomic_load_8 at PLT
+; NO-ATOMIC-NEXT:    adda.l #12, %sp
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: atomic_load_i64_unordered:
+; ATOMIC:       ; %bb.0:
+; ATOMIC-NEXT:    suba.l #12, %sp
+; ATOMIC-NEXT:    move.l #0, (4,%sp)
+; ATOMIC-NEXT:    move.l (16,%sp), (%sp)
+; ATOMIC-NEXT:    jsr __atomic_load_8 at PLT
+; ATOMIC-NEXT:    adda.l #12, %sp
+; ATOMIC-NEXT:    rts
+  %1 = load atomic i64, i64* %a unordered, align 8
+  ret i64 %1
+}
+
+define i64 @atomic_load_i64_monotonic(i64 *%a) nounwind {
+; NO-ATOMIC-LABEL: atomic_load_i64_monotonic:
+; NO-ATOMIC:       ; %bb.0:
+; NO-ATOMIC-NEXT:    suba.l #12, %sp
+; NO-ATOMIC-NEXT:    move.l #0, (4,%sp)
+; NO-ATOMIC-NEXT:    move.l (16,%sp), (%sp)
+; NO-ATOMIC-NEXT:    jsr __atomic_load_8 at PLT
+; NO-ATOMIC-NEXT:    adda.l #12, %sp
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: atomic_load_i64_monotonic:
+; ATOMIC:       ; %bb.0:
+; ATOMIC-NEXT:    suba.l #12, %sp
+; ATOMIC-NEXT:    move.l #0, (4,%sp)
+; ATOMIC-NEXT:    move.l (16,%sp), (%sp)
+; ATOMIC-NEXT:    jsr __atomic_load_8 at PLT
+; ATOMIC-NEXT:    adda.l #12, %sp
+; ATOMIC-NEXT:    rts
+  %1 = load atomic i64, i64* %a monotonic, align 8
+  ret i64 %1
+}
+
+define i64 @atomic_load_i64_acquire(i64 *%a) nounwind {
+; NO-ATOMIC-LABEL: atomic_load_i64_acquire:
+; NO-ATOMIC:       ; %bb.0:
+; NO-ATOMIC-NEXT:    suba.l #12, %sp
+; NO-ATOMIC-NEXT:    move.l #2, (4,%sp)
+; NO-ATOMIC-NEXT:    move.l (16,%sp), (%sp)
+; NO-ATOMIC-NEXT:    jsr __atomic_load_8 at PLT
+; NO-ATOMIC-NEXT:    adda.l #12, %sp
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: atomic_load_i64_acquire:
+; ATOMIC:       ; %bb.0:
+; ATOMIC-NEXT:    suba.l #12, %sp
+; ATOMIC-NEXT:    move.l #2, (4,%sp)
+; ATOMIC-NEXT:    move.l (16,%sp), (%sp)
+; ATOMIC-NEXT:    jsr __atomic_load_8 at PLT
+; ATOMIC-NEXT:    adda.l #12, %sp
+; ATOMIC-NEXT:    rts
+  %1 = load atomic i64, i64* %a acquire, align 8
+  ret i64 %1
+}
+
+define i64 @atomic_load_i64_seq_cst(i64 *%a) nounwind {
+; NO-ATOMIC-LABEL: atomic_load_i64_seq_cst:
+; NO-ATOMIC:       ; %bb.0:
+; NO-ATOMIC-NEXT:    suba.l #12, %sp
+; NO-ATOMIC-NEXT:    move.l #5, (4,%sp)
+; NO-ATOMIC-NEXT:    move.l (16,%sp), (%sp)
+; NO-ATOMIC-NEXT:    jsr __atomic_load_8 at PLT
+; NO-ATOMIC-NEXT:    adda.l #12, %sp
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: atomic_load_i64_seq_cst:
+; ATOMIC:       ; %bb.0:
+; ATOMIC-NEXT:    suba.l #12, %sp
+; ATOMIC-NEXT:    move.l #5, (4,%sp)
+; ATOMIC-NEXT:    move.l (16,%sp), (%sp)
+; ATOMIC-NEXT:    jsr __atomic_load_8 at PLT
+; ATOMIC-NEXT:    adda.l #12, %sp
+; ATOMIC-NEXT:    rts
+  %1 = load atomic i64, i64* %a seq_cst, align 8
+  ret i64 %1
+}
+
+define void @atomic_store_i8_unordered(i8 *%a, i8 %val) nounwind {
+; NO-ATOMIC-LABEL: atomic_store_i8_unordered:
+; NO-ATOMIC:       ; %bb.0:
+; NO-ATOMIC-NEXT:    move.b (11,%sp), %d0
+; NO-ATOMIC-NEXT:    move.l (4,%sp), %a0
+; NO-ATOMIC-NEXT:    move.b %d0, (%a0)
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: atomic_store_i8_unordered:
+; ATOMIC:       ; %bb.0:
+; ATOMIC-NEXT:    move.b (11,%sp), %d0
+; ATOMIC-NEXT:    move.l (4,%sp), %a0
+; ATOMIC-NEXT:    move.b %d0, (%a0)
+; ATOMIC-NEXT:    rts
+  store atomic i8 %val, i8* %a unordered, align 1
+  ret void
+}
+
+define void @atomic_store_i8_monotonic(i8 *%a, i8 %val) nounwind {
+; NO-ATOMIC-LABEL: atomic_store_i8_monotonic:
+; NO-ATOMIC:       ; %bb.0:
+; NO-ATOMIC-NEXT:    move.b (11,%sp), %d0
+; NO-ATOMIC-NEXT:    move.l (4,%sp), %a0
+; NO-ATOMIC-NEXT:    move.b %d0, (%a0)
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: atomic_store_i8_monotonic:
+; ATOMIC:       ; %bb.0:
+; ATOMIC-NEXT:    move.b (11,%sp), %d0
+; ATOMIC-NEXT:    move.l (4,%sp), %a0
+; ATOMIC-NEXT:    move.b %d0, (%a0)
+; ATOMIC-NEXT:    rts
+  store atomic i8 %val, i8* %a monotonic, align 1
+  ret void
+}
+
+define void @atomic_store_i8_release(i8 *%a, i8 %val) nounwind {
+; NO-ATOMIC-LABEL: atomic_store_i8_release:
+; NO-ATOMIC:       ; %bb.0:
+; NO-ATOMIC-NEXT:    move.b (11,%sp), %d0
+; NO-ATOMIC-NEXT:    move.l (4,%sp), %a0
+; NO-ATOMIC-NEXT:    move.b %d0, (%a0)
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: atomic_store_i8_release:
+; ATOMIC:       ; %bb.0:
+; ATOMIC-NEXT:    move.b (11,%sp), %d0
+; ATOMIC-NEXT:    move.l (4,%sp), %a0
+; ATOMIC-NEXT:    move.b %d0, (%a0)
+; ATOMIC-NEXT:    rts
+  store atomic i8 %val, i8* %a release, align 1
+  ret void
+}
+
+define void @atomic_store_i8_seq_cst(i8 *%a, i8 %val) nounwind {
+; NO-ATOMIC-LABEL: atomic_store_i8_seq_cst:
+; NO-ATOMIC:       ; %bb.0:
+; NO-ATOMIC-NEXT:    move.b (11,%sp), %d0
+; NO-ATOMIC-NEXT:    move.l (4,%sp), %a0
+; NO-ATOMIC-NEXT:    move.b %d0, (%a0)
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: atomic_store_i8_seq_cst:
+; ATOMIC:       ; %bb.0:
+; ATOMIC-NEXT:    move.b (11,%sp), %d0
+; ATOMIC-NEXT:    move.l (4,%sp), %a0
+; ATOMIC-NEXT:    move.b %d0, (%a0)
+; ATOMIC-NEXT:    rts
+  store atomic i8 %val, i8* %a seq_cst, align 1
+  ret void
+}
+
+define void @atomic_store_i16_unordered(i16 *%a, i16 %val) nounwind {
+; NO-ATOMIC-LABEL: atomic_store_i16_unordered:
+; NO-ATOMIC:       ; %bb.0:
+; NO-ATOMIC-NEXT:    move.w (10,%sp), %d0
+; NO-ATOMIC-NEXT:    move.l (4,%sp), %a0
+; NO-ATOMIC-NEXT:    move.w %d0, (%a0)
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: atomic_store_i16_unordered:
+; ATOMIC:       ; %bb.0:
+; ATOMIC-NEXT:    move.w (10,%sp), %d0
+; ATOMIC-NEXT:    move.l (4,%sp), %a0
+; ATOMIC-NEXT:    move.w %d0, (%a0)
+; ATOMIC-NEXT:    rts
+  store atomic i16 %val, i16* %a unordered, align 2
+  ret void
+}
+
+define void @atomic_store_i16_monotonic(i16 *%a, i16 %val) nounwind {
+; NO-ATOMIC-LABEL: atomic_store_i16_monotonic:
+; NO-ATOMIC:       ; %bb.0:
+; NO-ATOMIC-NEXT:    move.w (10,%sp), %d0
+; NO-ATOMIC-NEXT:    move.l (4,%sp), %a0
+; NO-ATOMIC-NEXT:    move.w %d0, (%a0)
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: atomic_store_i16_monotonic:
+; ATOMIC:       ; %bb.0:
+; ATOMIC-NEXT:    move.w (10,%sp), %d0
+; ATOMIC-NEXT:    move.l (4,%sp), %a0
+; ATOMIC-NEXT:    move.w %d0, (%a0)
+; ATOMIC-NEXT:    rts
+  store atomic i16 %val, i16* %a monotonic, align 2
+  ret void
+}
+
+define void @atomic_store_i16_release(i16 *%a, i16 %val) nounwind {
+; NO-ATOMIC-LABEL: atomic_store_i16_release:
+; NO-ATOMIC:       ; %bb.0:
+; NO-ATOMIC-NEXT:    move.w (10,%sp), %d0
+; NO-ATOMIC-NEXT:    move.l (4,%sp), %a0
+; NO-ATOMIC-NEXT:    move.w %d0, (%a0)
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: atomic_store_i16_release:
+; ATOMIC:       ; %bb.0:
+; ATOMIC-NEXT:    move.w (10,%sp), %d0
+; ATOMIC-NEXT:    move.l (4,%sp), %a0
+; ATOMIC-NEXT:    move.w %d0, (%a0)
+; ATOMIC-NEXT:    rts
+  store atomic i16 %val, i16* %a release, align 2
+  ret void
+}
+
+define void @atomic_store_i16_seq_cst(i16 *%a, i16 %val) nounwind {
+; NO-ATOMIC-LABEL: atomic_store_i16_seq_cst:
+; NO-ATOMIC:       ; %bb.0:
+; NO-ATOMIC-NEXT:    move.w (10,%sp), %d0
+; NO-ATOMIC-NEXT:    move.l (4,%sp), %a0
+; NO-ATOMIC-NEXT:    move.w %d0, (%a0)
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: atomic_store_i16_seq_cst:
+; ATOMIC:       ; %bb.0:
+; ATOMIC-NEXT:    move.w (10,%sp), %d0
+; ATOMIC-NEXT:    move.l (4,%sp), %a0
+; ATOMIC-NEXT:    move.w %d0, (%a0)
+; ATOMIC-NEXT:    rts
+  store atomic i16 %val, i16* %a seq_cst, align 2
+  ret void
+}
+
+define void @atomic_store_i32_unordered(i32 *%a, i32 %val) nounwind {
+; NO-ATOMIC-LABEL: atomic_store_i32_unordered:
+; NO-ATOMIC:       ; %bb.0:
+; NO-ATOMIC-NEXT:    move.l (8,%sp), %d0
+; NO-ATOMIC-NEXT:    move.l (4,%sp), %a0
+; NO-ATOMIC-NEXT:    move.l %d0, (%a0)
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: atomic_store_i32_unordered:
+; ATOMIC:       ; %bb.0:
+; ATOMIC-NEXT:    move.l (8,%sp), %d0
+; ATOMIC-NEXT:    move.l (4,%sp), %a0
+; ATOMIC-NEXT:    move.l %d0, (%a0)
+; ATOMIC-NEXT:    rts
+  store atomic i32 %val, i32* %a unordered, align 4
+  ret void
+}
+
+define void @atomic_store_i32_monotonic(i32 *%a, i32 %val) nounwind {
+; NO-ATOMIC-LABEL: atomic_store_i32_monotonic:
+; NO-ATOMIC:       ; %bb.0:
+; NO-ATOMIC-NEXT:    move.l (8,%sp), %d0
+; NO-ATOMIC-NEXT:    move.l (4,%sp), %a0
+; NO-ATOMIC-NEXT:    move.l %d0, (%a0)
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: atomic_store_i32_monotonic:
+; ATOMIC:       ; %bb.0:
+; ATOMIC-NEXT:    move.l (8,%sp), %d0
+; ATOMIC-NEXT:    move.l (4,%sp), %a0
+; ATOMIC-NEXT:    move.l %d0, (%a0)
+; ATOMIC-NEXT:    rts
+  store atomic i32 %val, i32* %a monotonic, align 4
+  ret void
+}
+
+define void @atomic_store_i32_release(i32 *%a, i32 %val) nounwind {
+; NO-ATOMIC-LABEL: atomic_store_i32_release:
+; NO-ATOMIC:       ; %bb.0:
+; NO-ATOMIC-NEXT:    move.l (8,%sp), %d0
+; NO-ATOMIC-NEXT:    move.l (4,%sp), %a0
+; NO-ATOMIC-NEXT:    move.l %d0, (%a0)
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: atomic_store_i32_release:
+; ATOMIC:       ; %bb.0:
+; ATOMIC-NEXT:    move.l (8,%sp), %d0
+; ATOMIC-NEXT:    move.l (4,%sp), %a0
+; ATOMIC-NEXT:    move.l %d0, (%a0)
+; ATOMIC-NEXT:    rts
+  store atomic i32 %val, i32* %a release, align 4
+  ret void
+}
+
+define void @atomic_store_i32_seq_cst(i32 *%a, i32 %val) nounwind {
+; NO-ATOMIC-LABEL: atomic_store_i32_seq_cst:
+; NO-ATOMIC:       ; %bb.0:
+; NO-ATOMIC-NEXT:    move.l (8,%sp), %d0
+; NO-ATOMIC-NEXT:    move.l (4,%sp), %a0
+; NO-ATOMIC-NEXT:    move.l %d0, (%a0)
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: atomic_store_i32_seq_cst:
+; ATOMIC:       ; %bb.0:
+; ATOMIC-NEXT:    move.l (8,%sp), %d0
+; ATOMIC-NEXT:    move.l (4,%sp), %a0
+; ATOMIC-NEXT:    move.l %d0, (%a0)
+; ATOMIC-NEXT:    rts
+  store atomic i32 %val, i32* %a seq_cst, align 4
+  ret void
+}
+
+define void @atomic_store_i64_unordered(i64 *%a, i64 %val) nounwind {
+; NO-ATOMIC-LABEL: atomic_store_i64_unordered:
+; NO-ATOMIC:       ; %bb.0:
+; NO-ATOMIC-NEXT:    suba.l #20, %sp
+; NO-ATOMIC-NEXT:    move.l #0, (12,%sp)
+; NO-ATOMIC-NEXT:    move.l (32,%sp), (8,%sp)
+; NO-ATOMIC-NEXT:    move.l (28,%sp), (4,%sp)
+; NO-ATOMIC-NEXT:    move.l (24,%sp), (%sp)
+; NO-ATOMIC-NEXT:    jsr __atomic_store_8 at PLT
+; NO-ATOMIC-NEXT:    adda.l #20, %sp
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: atomic_store_i64_unordered:
+; ATOMIC:       ; %bb.0:
+; ATOMIC-NEXT:    suba.l #20, %sp
+; ATOMIC-NEXT:    move.l #0, (12,%sp)
+; ATOMIC-NEXT:    move.l (32,%sp), (8,%sp)
+; ATOMIC-NEXT:    move.l (28,%sp), (4,%sp)
+; ATOMIC-NEXT:    move.l (24,%sp), (%sp)
+; ATOMIC-NEXT:    jsr __atomic_store_8 at PLT
+; ATOMIC-NEXT:    adda.l #20, %sp
+; ATOMIC-NEXT:    rts
+  store atomic i64 %val, i64* %a unordered, align 8
+  ret void
+}
+
+define void @atomic_store_i64_monotonic(i64 *%a, i64 %val) nounwind {
+; NO-ATOMIC-LABEL: atomic_store_i64_monotonic:
+; NO-ATOMIC:       ; %bb.0:
+; NO-ATOMIC-NEXT:    suba.l #20, %sp
+; NO-ATOMIC-NEXT:    move.l #0, (12,%sp)
+; NO-ATOMIC-NEXT:    move.l (32,%sp), (8,%sp)
+; NO-ATOMIC-NEXT:    move.l (28,%sp), (4,%sp)
+; NO-ATOMIC-NEXT:    move.l (24,%sp), (%sp)
+; NO-ATOMIC-NEXT:    jsr __atomic_store_8 at PLT
+; NO-ATOMIC-NEXT:    adda.l #20, %sp
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: atomic_store_i64_monotonic:
+; ATOMIC:       ; %bb.0:
+; ATOMIC-NEXT:    suba.l #20, %sp
+; ATOMIC-NEXT:    move.l #0, (12,%sp)
+; ATOMIC-NEXT:    move.l (32,%sp), (8,%sp)
+; ATOMIC-NEXT:    move.l (28,%sp), (4,%sp)
+; ATOMIC-NEXT:    move.l (24,%sp), (%sp)
+; ATOMIC-NEXT:    jsr __atomic_store_8 at PLT
+; ATOMIC-NEXT:    adda.l #20, %sp
+; ATOMIC-NEXT:    rts
+  store atomic i64 %val, i64* %a monotonic, align 8
+  ret void
+}
+
+define void @atomic_store_i64_release(i64 *%a, i64 %val) nounwind {
+; NO-ATOMIC-LABEL: atomic_store_i64_release:
+; NO-ATOMIC:       ; %bb.0:
+; NO-ATOMIC-NEXT:    suba.l #20, %sp
+; NO-ATOMIC-NEXT:    move.l #3, (12,%sp)
+; NO-ATOMIC-NEXT:    move.l (32,%sp), (8,%sp)
+; NO-ATOMIC-NEXT:    move.l (28,%sp), (4,%sp)
+; NO-ATOMIC-NEXT:    move.l (24,%sp), (%sp)
+; NO-ATOMIC-NEXT:    jsr __atomic_store_8 at PLT
+; NO-ATOMIC-NEXT:    adda.l #20, %sp
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: atomic_store_i64_release:
+; ATOMIC:       ; %bb.0:
+; ATOMIC-NEXT:    suba.l #20, %sp
+; ATOMIC-NEXT:    move.l #3, (12,%sp)
+; ATOMIC-NEXT:    move.l (32,%sp), (8,%sp)
+; ATOMIC-NEXT:    move.l (28,%sp), (4,%sp)
+; ATOMIC-NEXT:    move.l (24,%sp), (%sp)
+; ATOMIC-NEXT:    jsr __atomic_store_8 at PLT
+; ATOMIC-NEXT:    adda.l #20, %sp
+; ATOMIC-NEXT:    rts
+  store atomic i64 %val, i64* %a release, align 8
+  ret void
+}
+
+define void @atomic_store_i64_seq_cst(i64 *%a, i64 %val) nounwind {
+; NO-ATOMIC-LABEL: atomic_store_i64_seq_cst:
+; NO-ATOMIC:       ; %bb.0:
+; NO-ATOMIC-NEXT:    suba.l #20, %sp
+; NO-ATOMIC-NEXT:    move.l #5, (12,%sp)
+; NO-ATOMIC-NEXT:    move.l (32,%sp), (8,%sp)
+; NO-ATOMIC-NEXT:    move.l (28,%sp), (4,%sp)
+; NO-ATOMIC-NEXT:    move.l (24,%sp), (%sp)
+; NO-ATOMIC-NEXT:    jsr __atomic_store_8 at PLT
+; NO-ATOMIC-NEXT:    adda.l #20, %sp
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: atomic_store_i64_seq_cst:
+; ATOMIC:       ; %bb.0:
+; ATOMIC-NEXT:    suba.l #20, %sp
+; ATOMIC-NEXT:    move.l #5, (12,%sp)
+; ATOMIC-NEXT:    move.l (32,%sp), (8,%sp)
+; ATOMIC-NEXT:    move.l (28,%sp), (4,%sp)
+; ATOMIC-NEXT:    move.l (24,%sp), (%sp)
+; ATOMIC-NEXT:    jsr __atomic_store_8 at PLT
+; ATOMIC-NEXT:    adda.l #20, %sp
+; ATOMIC-NEXT:    rts
+  store atomic i64 %val, i64* %a seq_cst, align 8
+  ret void
+}

diff  --git a/llvm/test/CodeGen/M68k/Atomics/rmw.ll b/llvm/test/CodeGen/M68k/Atomics/rmw.ll
new file mode 100644
index 0000000000000..1ab8fac6486f6
--- /dev/null
+++ b/llvm/test/CodeGen/M68k/Atomics/rmw.ll
@@ -0,0 +1,508 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc %s -o - -mtriple=m68k -mcpu=M68000 | FileCheck %s --check-prefix=NO-ATOMIC
+; RUN: llc %s -o - -mtriple=m68k -mcpu=M68010 | FileCheck %s --check-prefix=NO-ATOMIC
+; RUN: llc %s -o - -mtriple=m68k -mcpu=M68020 | FileCheck %s --check-prefix=ATOMIC
+; RUN: llc %s -o - -mtriple=m68k -mcpu=M68030 | FileCheck %s --check-prefix=ATOMIC
+; RUN: llc %s -o - -mtriple=m68k -mcpu=M68040 | FileCheck %s --check-prefix=ATOMIC
+
+define i8 @atomicrmw_add_i8(i8 %val, ptr %ptr) {
+; NO-ATOMIC-LABEL: atomicrmw_add_i8:
+; NO-ATOMIC:         .cfi_startproc
+; NO-ATOMIC-NEXT:  ; %bb.0:
+; NO-ATOMIC-NEXT:    suba.l #12, %sp
+; NO-ATOMIC-NEXT:    .cfi_def_cfa_offset -16
+; NO-ATOMIC-NEXT:    move.b (19,%sp), %d0
+; NO-ATOMIC-NEXT:    and.l #255, %d0
+; NO-ATOMIC-NEXT:    move.l %d0, (4,%sp)
+; NO-ATOMIC-NEXT:    move.l (20,%sp), (%sp)
+; NO-ATOMIC-NEXT:    jsr __sync_fetch_and_add_1 at PLT
+; NO-ATOMIC-NEXT:    adda.l #12, %sp
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: atomicrmw_add_i8:
+; ATOMIC:         .cfi_startproc
+; ATOMIC-NEXT:  ; %bb.0:
+; ATOMIC-NEXT:    suba.l #8, %sp
+; ATOMIC-NEXT:    .cfi_def_cfa_offset -12
+; ATOMIC-NEXT:    movem.l %d2-%d3, (0,%sp) ; 12-byte Folded Spill
+; ATOMIC-NEXT:    move.b (15,%sp), %d1
+; ATOMIC-NEXT:    move.l (16,%sp), %a0
+; ATOMIC-NEXT:    move.b (%a0), %d2
+; ATOMIC-NEXT:    move.b %d2, %d0
+; ATOMIC-NEXT:  .LBB0_1: ; %atomicrmw.start
+; ATOMIC-NEXT:    ; =>This Inner Loop Header: Depth=1
+; ATOMIC-NEXT:    move.b %d2, %d3
+; ATOMIC-NEXT:    add.b %d1, %d3
+; ATOMIC-NEXT:    cas.b %d0, %d3, (%a0)
+; ATOMIC-NEXT:    move.b %d0, %d3
+; ATOMIC-NEXT:    sub.b %d2, %d3
+; ATOMIC-NEXT:    seq %d2
+; ATOMIC-NEXT:    sub.b #1, %d2
+; ATOMIC-NEXT:    move.b %d0, %d2
+; ATOMIC-NEXT:    bne .LBB0_1
+; ATOMIC-NEXT:  ; %bb.2: ; %atomicrmw.end
+; ATOMIC-NEXT:    movem.l (0,%sp), %d2-%d3 ; 12-byte Folded Reload
+; ATOMIC-NEXT:    adda.l #8, %sp
+; ATOMIC-NEXT:    rts
+  %old = atomicrmw add ptr %ptr, i8 %val monotonic
+  ret i8 %old
+}
+
+define i16 @atomicrmw_sub_i16(i16 %val, ptr %ptr) {
+; NO-ATOMIC-LABEL: atomicrmw_sub_i16:
+; NO-ATOMIC:         .cfi_startproc
+; NO-ATOMIC-NEXT:  ; %bb.0:
+; NO-ATOMIC-NEXT:    suba.l #12, %sp
+; NO-ATOMIC-NEXT:    .cfi_def_cfa_offset -16
+; NO-ATOMIC-NEXT:    move.w (18,%sp), %d0
+; NO-ATOMIC-NEXT:    and.l #65535, %d0
+; NO-ATOMIC-NEXT:    move.l %d0, (4,%sp)
+; NO-ATOMIC-NEXT:    move.l (20,%sp), (%sp)
+; NO-ATOMIC-NEXT:    jsr __sync_fetch_and_sub_2 at PLT
+; NO-ATOMIC-NEXT:    adda.l #12, %sp
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: atomicrmw_sub_i16:
+; ATOMIC:         .cfi_startproc
+; ATOMIC-NEXT:  ; %bb.0:
+; ATOMIC-NEXT:    suba.l #8, %sp
+; ATOMIC-NEXT:    .cfi_def_cfa_offset -12
+; ATOMIC-NEXT:    movem.l %d2-%d3, (0,%sp) ; 12-byte Folded Spill
+; ATOMIC-NEXT:    move.w (14,%sp), %d1
+; ATOMIC-NEXT:    move.l (16,%sp), %a0
+; ATOMIC-NEXT:    move.w (%a0), %d2
+; ATOMIC-NEXT:    move.w %d2, %d0
+; ATOMIC-NEXT:  .LBB1_1: ; %atomicrmw.start
+; ATOMIC-NEXT:    ; =>This Inner Loop Header: Depth=1
+; ATOMIC-NEXT:    move.w %d2, %d3
+; ATOMIC-NEXT:    sub.w %d1, %d3
+; ATOMIC-NEXT:    cas.w %d0, %d3, (%a0)
+; ATOMIC-NEXT:    move.w %d0, %d3
+; ATOMIC-NEXT:    sub.w %d2, %d3
+; ATOMIC-NEXT:    seq %d2
+; ATOMIC-NEXT:    sub.b #1, %d2
+; ATOMIC-NEXT:    move.w %d0, %d2
+; ATOMIC-NEXT:    bne .LBB1_1
+; ATOMIC-NEXT:  ; %bb.2: ; %atomicrmw.end
+; ATOMIC-NEXT:    movem.l (0,%sp), %d2-%d3 ; 12-byte Folded Reload
+; ATOMIC-NEXT:    adda.l #8, %sp
+; ATOMIC-NEXT:    rts
+  %old = atomicrmw sub ptr %ptr, i16 %val acquire
+  ret i16 %old
+}
+
+define i32 @atomicrmw_and_i32(i32 %val, ptr %ptr) {
+; NO-ATOMIC-LABEL: atomicrmw_and_i32:
+; NO-ATOMIC:         .cfi_startproc
+; NO-ATOMIC-NEXT:  ; %bb.0:
+; NO-ATOMIC-NEXT:    suba.l #12, %sp
+; NO-ATOMIC-NEXT:    .cfi_def_cfa_offset -16
+; NO-ATOMIC-NEXT:    move.l (16,%sp), (4,%sp)
+; NO-ATOMIC-NEXT:    move.l (20,%sp), (%sp)
+; NO-ATOMIC-NEXT:    jsr __sync_fetch_and_and_4 at PLT
+; NO-ATOMIC-NEXT:    adda.l #12, %sp
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: atomicrmw_and_i32:
+; ATOMIC:         .cfi_startproc
+; ATOMIC-NEXT:  ; %bb.0:
+; ATOMIC-NEXT:    suba.l #8, %sp
+; ATOMIC-NEXT:    .cfi_def_cfa_offset -12
+; ATOMIC-NEXT:    movem.l %d2-%d3, (0,%sp) ; 12-byte Folded Spill
+; ATOMIC-NEXT:    move.l (12,%sp), %d1
+; ATOMIC-NEXT:    move.l (16,%sp), %a0
+; ATOMIC-NEXT:    move.l (%a0), %d2
+; ATOMIC-NEXT:    move.l %d2, %d0
+; ATOMIC-NEXT:  .LBB2_1: ; %atomicrmw.start
+; ATOMIC-NEXT:    ; =>This Inner Loop Header: Depth=1
+; ATOMIC-NEXT:    move.l %d2, %d3
+; ATOMIC-NEXT:    and.l %d1, %d3
+; ATOMIC-NEXT:    cas.l %d0, %d3, (%a0)
+; ATOMIC-NEXT:    move.l %d0, %d3
+; ATOMIC-NEXT:    sub.l %d2, %d3
+; ATOMIC-NEXT:    seq %d2
+; ATOMIC-NEXT:    sub.b #1, %d2
+; ATOMIC-NEXT:    move.l %d0, %d2
+; ATOMIC-NEXT:    bne .LBB2_1
+; ATOMIC-NEXT:  ; %bb.2: ; %atomicrmw.end
+; ATOMIC-NEXT:    movem.l (0,%sp), %d2-%d3 ; 12-byte Folded Reload
+; ATOMIC-NEXT:    adda.l #8, %sp
+; ATOMIC-NEXT:    rts
+  %old = atomicrmw and ptr %ptr, i32 %val seq_cst
+  ret i32 %old
+}
+
+define i64 @atomicrmw_xor_i64(i64 %val, ptr %ptr) {
+; NO-ATOMIC-LABEL: atomicrmw_xor_i64:
+; NO-ATOMIC:         .cfi_startproc
+; NO-ATOMIC-NEXT:  ; %bb.0:
+; NO-ATOMIC-NEXT:    suba.l #20, %sp
+; NO-ATOMIC-NEXT:    .cfi_def_cfa_offset -24
+; NO-ATOMIC-NEXT:    move.l #3, (12,%sp)
+; NO-ATOMIC-NEXT:    move.l (28,%sp), (8,%sp)
+; NO-ATOMIC-NEXT:    move.l (24,%sp), (4,%sp)
+; NO-ATOMIC-NEXT:    move.l (32,%sp), (%sp)
+; NO-ATOMIC-NEXT:    jsr __atomic_fetch_xor_8 at PLT
+; NO-ATOMIC-NEXT:    adda.l #20, %sp
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: atomicrmw_xor_i64:
+; ATOMIC:         .cfi_startproc
+; ATOMIC-NEXT:  ; %bb.0:
+; ATOMIC-NEXT:    suba.l #20, %sp
+; ATOMIC-NEXT:    .cfi_def_cfa_offset -24
+; ATOMIC-NEXT:    move.l #3, (12,%sp)
+; ATOMIC-NEXT:    move.l (28,%sp), (8,%sp)
+; ATOMIC-NEXT:    move.l (24,%sp), (4,%sp)
+; ATOMIC-NEXT:    move.l (32,%sp), (%sp)
+; ATOMIC-NEXT:    jsr __atomic_fetch_xor_8 at PLT
+; ATOMIC-NEXT:    adda.l #20, %sp
+; ATOMIC-NEXT:    rts
+  %old = atomicrmw xor ptr %ptr, i64 %val release
+  ret i64 %old
+}
+
+define i8 @atomicrmw_or_i8(i8 %val, ptr %ptr) {
+; NO-ATOMIC-LABEL: atomicrmw_or_i8:
+; NO-ATOMIC:         .cfi_startproc
+; NO-ATOMIC-NEXT:  ; %bb.0:
+; NO-ATOMIC-NEXT:    suba.l #12, %sp
+; NO-ATOMIC-NEXT:    .cfi_def_cfa_offset -16
+; NO-ATOMIC-NEXT:    move.b (19,%sp), %d0
+; NO-ATOMIC-NEXT:    and.l #255, %d0
+; NO-ATOMIC-NEXT:    move.l %d0, (4,%sp)
+; NO-ATOMIC-NEXT:    move.l (20,%sp), (%sp)
+; NO-ATOMIC-NEXT:    jsr __sync_fetch_and_or_1 at PLT
+; NO-ATOMIC-NEXT:    adda.l #12, %sp
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: atomicrmw_or_i8:
+; ATOMIC:         .cfi_startproc
+; ATOMIC-NEXT:  ; %bb.0:
+; ATOMIC-NEXT:    suba.l #8, %sp
+; ATOMIC-NEXT:    .cfi_def_cfa_offset -12
+; ATOMIC-NEXT:    movem.l %d2-%d3, (0,%sp) ; 12-byte Folded Spill
+; ATOMIC-NEXT:    move.b (15,%sp), %d1
+; ATOMIC-NEXT:    move.l (16,%sp), %a0
+; ATOMIC-NEXT:    move.b (%a0), %d2
+; ATOMIC-NEXT:    move.b %d2, %d0
+; ATOMIC-NEXT:  .LBB4_1: ; %atomicrmw.start
+; ATOMIC-NEXT:    ; =>This Inner Loop Header: Depth=1
+; ATOMIC-NEXT:    move.b %d2, %d3
+; ATOMIC-NEXT:    or.b %d1, %d3
+; ATOMIC-NEXT:    cas.b %d0, %d3, (%a0)
+; ATOMIC-NEXT:    move.b %d0, %d3
+; ATOMIC-NEXT:    sub.b %d2, %d3
+; ATOMIC-NEXT:    seq %d2
+; ATOMIC-NEXT:    sub.b #1, %d2
+; ATOMIC-NEXT:    move.b %d0, %d2
+; ATOMIC-NEXT:    bne .LBB4_1
+; ATOMIC-NEXT:  ; %bb.2: ; %atomicrmw.end
+; ATOMIC-NEXT:    movem.l (0,%sp), %d2-%d3 ; 12-byte Folded Reload
+; ATOMIC-NEXT:    adda.l #8, %sp
+; ATOMIC-NEXT:    rts
+  %old = atomicrmw or ptr %ptr, i8 %val monotonic
+  ret i8 %old
+}
+
+define i16 @atmoicrmw_nand_i16(i16 %val, ptr %ptr) {
+; NO-ATOMIC-LABEL: atmoicrmw_nand_i16:
+; NO-ATOMIC:         .cfi_startproc
+; NO-ATOMIC-NEXT:  ; %bb.0:
+; NO-ATOMIC-NEXT:    suba.l #12, %sp
+; NO-ATOMIC-NEXT:    .cfi_def_cfa_offset -16
+; NO-ATOMIC-NEXT:    movem.l %d2, (8,%sp) ; 8-byte Folded Spill
+; NO-ATOMIC-NEXT:    move.w (18,%sp), %d2
+; NO-ATOMIC-NEXT:    move.l %d2, %d0
+; NO-ATOMIC-NEXT:    and.l #65535, %d0
+; NO-ATOMIC-NEXT:    move.l %d0, (4,%sp)
+; NO-ATOMIC-NEXT:    move.l (20,%sp), (%sp)
+; NO-ATOMIC-NEXT:    jsr __sync_fetch_and_nand_2 at PLT
+; NO-ATOMIC-NEXT:    move.w %d2, %d0
+; NO-ATOMIC-NEXT:    movem.l (8,%sp), %d2 ; 8-byte Folded Reload
+; NO-ATOMIC-NEXT:    adda.l #12, %sp
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: atmoicrmw_nand_i16:
+; ATOMIC:         .cfi_startproc
+; ATOMIC-NEXT:  ; %bb.0:
+; ATOMIC-NEXT:    suba.l #8, %sp
+; ATOMIC-NEXT:    .cfi_def_cfa_offset -12
+; ATOMIC-NEXT:    movem.l %d2-%d3, (0,%sp) ; 12-byte Folded Spill
+; ATOMIC-NEXT:    move.w (14,%sp), %d0
+; ATOMIC-NEXT:    move.l (16,%sp), %a0
+; ATOMIC-NEXT:    move.w (%a0), %d2
+; ATOMIC-NEXT:    move.w %d2, %d1
+; ATOMIC-NEXT:  .LBB5_1: ; %atomicrmw.start
+; ATOMIC-NEXT:    ; =>This Inner Loop Header: Depth=1
+; ATOMIC-NEXT:    move.w %d2, %d3
+; ATOMIC-NEXT:    and.w %d0, %d3
+; ATOMIC-NEXT:    eori.w #-1, %d3
+; ATOMIC-NEXT:    cas.w %d1, %d3, (%a0)
+; ATOMIC-NEXT:    move.w %d1, %d3
+; ATOMIC-NEXT:    sub.w %d2, %d3
+; ATOMIC-NEXT:    seq %d2
+; ATOMIC-NEXT:    sub.b #1, %d2
+; ATOMIC-NEXT:    move.w %d1, %d2
+; ATOMIC-NEXT:    bne .LBB5_1
+; ATOMIC-NEXT:  ; %bb.2: ; %atomicrmw.end
+; ATOMIC-NEXT:    movem.l (0,%sp), %d2-%d3 ; 12-byte Folded Reload
+; ATOMIC-NEXT:    adda.l #8, %sp
+; ATOMIC-NEXT:    rts
+  %old = atomicrmw nand ptr %ptr, i16 %val seq_cst
+  ret i16 %val
+}
+
+define i32 @atomicrmw_min_i32(i32 %val, ptr %ptr) {
+; NO-ATOMIC-LABEL: atomicrmw_min_i32:
+; NO-ATOMIC:         .cfi_startproc
+; NO-ATOMIC-NEXT:  ; %bb.0:
+; NO-ATOMIC-NEXT:    suba.l #12, %sp
+; NO-ATOMIC-NEXT:    .cfi_def_cfa_offset -16
+; NO-ATOMIC-NEXT:    move.l (16,%sp), (4,%sp)
+; NO-ATOMIC-NEXT:    move.l (20,%sp), (%sp)
+; NO-ATOMIC-NEXT:    jsr __sync_fetch_and_min_4 at PLT
+; NO-ATOMIC-NEXT:    adda.l #12, %sp
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: atomicrmw_min_i32:
+; ATOMIC:         .cfi_startproc
+; ATOMIC-NEXT:  ; %bb.0:
+; ATOMIC-NEXT:    suba.l #8, %sp
+; ATOMIC-NEXT:    .cfi_def_cfa_offset -12
+; ATOMIC-NEXT:    movem.l %d2-%d3, (0,%sp) ; 12-byte Folded Spill
+; ATOMIC-NEXT:    move.l (12,%sp), %d1
+; ATOMIC-NEXT:    move.l (16,%sp), %a0
+; ATOMIC-NEXT:    move.l (%a0), %d2
+; ATOMIC-NEXT:    bra .LBB6_1
+; ATOMIC-NEXT:  .LBB6_3: ; %atomicrmw.start
+; ATOMIC-NEXT:    ; in Loop: Header=BB6_1 Depth=1
+; ATOMIC-NEXT:    move.l %d2, %d0
+; ATOMIC-NEXT:    cas.l %d0, %d3, (%a0)
+; ATOMIC-NEXT:    move.l %d0, %d3
+; ATOMIC-NEXT:    sub.l %d2, %d3
+; ATOMIC-NEXT:    seq %d2
+; ATOMIC-NEXT:    sub.b #1, %d2
+; ATOMIC-NEXT:    move.l %d0, %d2
+; ATOMIC-NEXT:    beq .LBB6_4
+; ATOMIC-NEXT:  .LBB6_1: ; %atomicrmw.start
+; ATOMIC-NEXT:    ; =>This Inner Loop Header: Depth=1
+; ATOMIC-NEXT:    move.l %d2, %d0
+; ATOMIC-NEXT:    sub.l %d1, %d0
+; ATOMIC-NEXT:    move.l %d2, %d3
+; ATOMIC-NEXT:    ble .LBB6_3
+; ATOMIC-NEXT:  ; %bb.2: ; %atomicrmw.start
+; ATOMIC-NEXT:    ; in Loop: Header=BB6_1 Depth=1
+; ATOMIC-NEXT:    move.l %d1, %d3
+; ATOMIC-NEXT:    bra .LBB6_3
+; ATOMIC-NEXT:  .LBB6_4: ; %atomicrmw.end
+; ATOMIC-NEXT:    movem.l (0,%sp), %d2-%d3 ; 12-byte Folded Reload
+; ATOMIC-NEXT:    adda.l #8, %sp
+; ATOMIC-NEXT:    rts
+  %old = atomicrmw min ptr %ptr, i32 %val acquire
+  ret i32 %old
+}
+
+define i64 @atomicrmw_max_i64(i64 %val, ptr %ptr) {
+; NO-ATOMIC-LABEL: atomicrmw_max_i64:
+; NO-ATOMIC:         .cfi_startproc
+; NO-ATOMIC-NEXT:  ; %bb.0:
+; NO-ATOMIC-NEXT:    suba.l #52, %sp
+; NO-ATOMIC-NEXT:    .cfi_def_cfa_offset -56
+; NO-ATOMIC-NEXT:    movem.l %d2-%d4/%a2-%a3, (32,%sp) ; 24-byte Folded Spill
+; NO-ATOMIC-NEXT:    move.l (60,%sp), %d3
+; NO-ATOMIC-NEXT:    move.l (56,%sp), %d4
+; NO-ATOMIC-NEXT:    move.l (64,%sp), %a2
+; NO-ATOMIC-NEXT:    move.l (4,%a2), %d1
+; NO-ATOMIC-NEXT:    move.l (%a2), %d0
+; NO-ATOMIC-NEXT:    lea (24,%sp), %a3
+; NO-ATOMIC-NEXT:    bra .LBB7_1
+; NO-ATOMIC-NEXT:  .LBB7_3: ; %atomicrmw.start
+; NO-ATOMIC-NEXT:    ; in Loop: Header=BB7_1 Depth=1
+; NO-ATOMIC-NEXT:    move.l %d1, (12,%sp)
+; NO-ATOMIC-NEXT:    move.l %d0, (8,%sp)
+; NO-ATOMIC-NEXT:    move.l #5, (20,%sp)
+; NO-ATOMIC-NEXT:    move.l #5, (16,%sp)
+; NO-ATOMIC-NEXT:    jsr __atomic_compare_exchange_8 at PLT
+; NO-ATOMIC-NEXT:    move.b %d0, %d2
+; NO-ATOMIC-NEXT:    move.l (28,%sp), %d1
+; NO-ATOMIC-NEXT:    move.l (24,%sp), %d0
+; NO-ATOMIC-NEXT:    cmpi.b #0, %d2
+; NO-ATOMIC-NEXT:    bne .LBB7_4
+; NO-ATOMIC-NEXT:  .LBB7_1: ; %atomicrmw.start
+; NO-ATOMIC-NEXT:    ; =>This Inner Loop Header: Depth=1
+; NO-ATOMIC-NEXT:    move.l %d0, (24,%sp)
+; NO-ATOMIC-NEXT:    move.l %d1, (28,%sp)
+; NO-ATOMIC-NEXT:    move.l %a2, (%sp)
+; NO-ATOMIC-NEXT:    move.l %a3, (4,%sp)
+; NO-ATOMIC-NEXT:    move.l %d3, %d2
+; NO-ATOMIC-NEXT:    sub.l %d1, %d2
+; NO-ATOMIC-NEXT:    move.l %d4, %d2
+; NO-ATOMIC-NEXT:    subx.l %d0, %d2
+; NO-ATOMIC-NEXT:    slt %d2
+; NO-ATOMIC-NEXT:    cmpi.b #0, %d2
+; NO-ATOMIC-NEXT:    bne .LBB7_3
+; NO-ATOMIC-NEXT:  ; %bb.2: ; %atomicrmw.start
+; NO-ATOMIC-NEXT:    ; in Loop: Header=BB7_1 Depth=1
+; NO-ATOMIC-NEXT:    move.l %d3, %d1
+; NO-ATOMIC-NEXT:    move.l %d4, %d0
+; NO-ATOMIC-NEXT:    bra .LBB7_3
+; NO-ATOMIC-NEXT:  .LBB7_4: ; %atomicrmw.end
+; NO-ATOMIC-NEXT:    movem.l (32,%sp), %d2-%d4/%a2-%a3 ; 24-byte Folded Reload
+; NO-ATOMIC-NEXT:    adda.l #52, %sp
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: atomicrmw_max_i64:
+; ATOMIC:         .cfi_startproc
+; ATOMIC-NEXT:  ; %bb.0:
+; ATOMIC-NEXT:    suba.l #52, %sp
+; ATOMIC-NEXT:    .cfi_def_cfa_offset -56
+; ATOMIC-NEXT:    movem.l %d2-%d4/%a2-%a3, (32,%sp) ; 24-byte Folded Spill
+; ATOMIC-NEXT:    move.l (60,%sp), %d3
+; ATOMIC-NEXT:    move.l (56,%sp), %d4
+; ATOMIC-NEXT:    move.l (64,%sp), %a2
+; ATOMIC-NEXT:    move.l (4,%a2), %d1
+; ATOMIC-NEXT:    move.l (%a2), %d0
+; ATOMIC-NEXT:    lea (24,%sp), %a3
+; ATOMIC-NEXT:    bra .LBB7_1
+; ATOMIC-NEXT:  .LBB7_3: ; %atomicrmw.start
+; ATOMIC-NEXT:    ; in Loop: Header=BB7_1 Depth=1
+; ATOMIC-NEXT:    move.l %d1, (12,%sp)
+; ATOMIC-NEXT:    move.l %d0, (8,%sp)
+; ATOMIC-NEXT:    move.l #5, (20,%sp)
+; ATOMIC-NEXT:    move.l #5, (16,%sp)
+; ATOMIC-NEXT:    jsr __atomic_compare_exchange_8 at PLT
+; ATOMIC-NEXT:    move.b %d0, %d2
+; ATOMIC-NEXT:    move.l (28,%sp), %d1
+; ATOMIC-NEXT:    move.l (24,%sp), %d0
+; ATOMIC-NEXT:    cmpi.b #0, %d2
+; ATOMIC-NEXT:    bne .LBB7_4
+; ATOMIC-NEXT:  .LBB7_1: ; %atomicrmw.start
+; ATOMIC-NEXT:    ; =>This Inner Loop Header: Depth=1
+; ATOMIC-NEXT:    move.l %d0, (24,%sp)
+; ATOMIC-NEXT:    move.l %d1, (28,%sp)
+; ATOMIC-NEXT:    move.l %a2, (%sp)
+; ATOMIC-NEXT:    move.l %a3, (4,%sp)
+; ATOMIC-NEXT:    move.l %d3, %d2
+; ATOMIC-NEXT:    sub.l %d1, %d2
+; ATOMIC-NEXT:    move.l %d4, %d2
+; ATOMIC-NEXT:    subx.l %d0, %d2
+; ATOMIC-NEXT:    slt %d2
+; ATOMIC-NEXT:    cmpi.b #0, %d2
+; ATOMIC-NEXT:    bne .LBB7_3
+; ATOMIC-NEXT:  ; %bb.2: ; %atomicrmw.start
+; ATOMIC-NEXT:    ; in Loop: Header=BB7_1 Depth=1
+; ATOMIC-NEXT:    move.l %d3, %d1
+; ATOMIC-NEXT:    move.l %d4, %d0
+; ATOMIC-NEXT:    bra .LBB7_3
+; ATOMIC-NEXT:  .LBB7_4: ; %atomicrmw.end
+; ATOMIC-NEXT:    movem.l (32,%sp), %d2-%d4/%a2-%a3 ; 24-byte Folded Reload
+; ATOMIC-NEXT:    adda.l #52, %sp
+; ATOMIC-NEXT:    rts
+  %old = atomicrmw max ptr %ptr, i64 %val seq_cst
+  ret i64 %old
+}
+
+define i8 @atomicrmw_i8_umin(i8 %val, ptr %ptr) {
+; NO-ATOMIC-LABEL: atomicrmw_i8_umin:
+; NO-ATOMIC:         .cfi_startproc
+; NO-ATOMIC-NEXT:  ; %bb.0:
+; NO-ATOMIC-NEXT:    suba.l #12, %sp
+; NO-ATOMIC-NEXT:    .cfi_def_cfa_offset -16
+; NO-ATOMIC-NEXT:    move.b (19,%sp), %d0
+; NO-ATOMIC-NEXT:    and.l #255, %d0
+; NO-ATOMIC-NEXT:    move.l %d0, (4,%sp)
+; NO-ATOMIC-NEXT:    move.l (20,%sp), (%sp)
+; NO-ATOMIC-NEXT:    jsr __sync_fetch_and_umin_1 at PLT
+; NO-ATOMIC-NEXT:    adda.l #12, %sp
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: atomicrmw_i8_umin:
+; ATOMIC:         .cfi_startproc
+; ATOMIC-NEXT:  ; %bb.0:
+; ATOMIC-NEXT:    suba.l #8, %sp
+; ATOMIC-NEXT:    .cfi_def_cfa_offset -12
+; ATOMIC-NEXT:    movem.l %d2-%d3, (0,%sp) ; 12-byte Folded Spill
+; ATOMIC-NEXT:    move.b (15,%sp), %d1
+; ATOMIC-NEXT:    move.l (16,%sp), %a0
+; ATOMIC-NEXT:    move.b (%a0), %d2
+; ATOMIC-NEXT:    bra .LBB8_1
+; ATOMIC-NEXT:  .LBB8_3: ; %atomicrmw.start
+; ATOMIC-NEXT:    ; in Loop: Header=BB8_1 Depth=1
+; ATOMIC-NEXT:    move.b %d2, %d0
+; ATOMIC-NEXT:    cas.b %d0, %d3, (%a0)
+; ATOMIC-NEXT:    move.b %d0, %d3
+; ATOMIC-NEXT:    sub.b %d2, %d3
+; ATOMIC-NEXT:    seq %d2
+; ATOMIC-NEXT:    sub.b #1, %d2
+; ATOMIC-NEXT:    move.b %d0, %d2
+; ATOMIC-NEXT:    beq .LBB8_4
+; ATOMIC-NEXT:  .LBB8_1: ; %atomicrmw.start
+; ATOMIC-NEXT:    ; =>This Inner Loop Header: Depth=1
+; ATOMIC-NEXT:    move.b %d2, %d0
+; ATOMIC-NEXT:    sub.b %d1, %d0
+; ATOMIC-NEXT:    move.b %d2, %d3
+; ATOMIC-NEXT:    bls .LBB8_3
+; ATOMIC-NEXT:  ; %bb.2: ; %atomicrmw.start
+; ATOMIC-NEXT:    ; in Loop: Header=BB8_1 Depth=1
+; ATOMIC-NEXT:    move.b %d1, %d3
+; ATOMIC-NEXT:    bra .LBB8_3
+; ATOMIC-NEXT:  .LBB8_4: ; %atomicrmw.end
+; ATOMIC-NEXT:    movem.l (0,%sp), %d2-%d3 ; 12-byte Folded Reload
+; ATOMIC-NEXT:    adda.l #8, %sp
+; ATOMIC-NEXT:    rts
+  %old = atomicrmw umin ptr %ptr, i8 %val release
+  ret i8 %old
+}
+
+define i16 @atomicrmw_umax_i16(i16 %val, ptr %ptr) {
+; NO-ATOMIC-LABEL: atomicrmw_umax_i16:
+; NO-ATOMIC:         .cfi_startproc
+; NO-ATOMIC-NEXT:  ; %bb.0:
+; NO-ATOMIC-NEXT:    suba.l #12, %sp
+; NO-ATOMIC-NEXT:    .cfi_def_cfa_offset -16
+; NO-ATOMIC-NEXT:    move.w (18,%sp), %d0
+; NO-ATOMIC-NEXT:    and.l #65535, %d0
+; NO-ATOMIC-NEXT:    move.l %d0, (4,%sp)
+; NO-ATOMIC-NEXT:    move.l (20,%sp), (%sp)
+; NO-ATOMIC-NEXT:    jsr __sync_fetch_and_umax_2 at PLT
+; NO-ATOMIC-NEXT:    adda.l #12, %sp
+; NO-ATOMIC-NEXT:    rts
+;
+; ATOMIC-LABEL: atomicrmw_umax_i16:
+; ATOMIC:         .cfi_startproc
+; ATOMIC-NEXT:  ; %bb.0:
+; ATOMIC-NEXT:    suba.l #8, %sp
+; ATOMIC-NEXT:    .cfi_def_cfa_offset -12
+; ATOMIC-NEXT:    movem.l %d2-%d3, (0,%sp) ; 12-byte Folded Spill
+; ATOMIC-NEXT:    move.w (14,%sp), %d1
+; ATOMIC-NEXT:    move.l (16,%sp), %a0
+; ATOMIC-NEXT:    move.w (%a0), %d2
+; ATOMIC-NEXT:    bra .LBB9_1
+; ATOMIC-NEXT:  .LBB9_3: ; %atomicrmw.start
+; ATOMIC-NEXT:    ; in Loop: Header=BB9_1 Depth=1
+; ATOMIC-NEXT:    move.w %d2, %d0
+; ATOMIC-NEXT:    cas.w %d0, %d3, (%a0)
+; ATOMIC-NEXT:    move.w %d0, %d3
+; ATOMIC-NEXT:    sub.w %d2, %d3
+; ATOMIC-NEXT:    seq %d2
+; ATOMIC-NEXT:    sub.b #1, %d2
+; ATOMIC-NEXT:    move.w %d0, %d2
+; ATOMIC-NEXT:    beq .LBB9_4
+; ATOMIC-NEXT:  .LBB9_1: ; %atomicrmw.start
+; ATOMIC-NEXT:    ; =>This Inner Loop Header: Depth=1
+; ATOMIC-NEXT:    move.w %d2, %d0
+; ATOMIC-NEXT:    sub.w %d1, %d0
+; ATOMIC-NEXT:    move.w %d2, %d3
+; ATOMIC-NEXT:    bhi .LBB9_3
+; ATOMIC-NEXT:  ; %bb.2: ; %atomicrmw.start
+; ATOMIC-NEXT:    ; in Loop: Header=BB9_1 Depth=1
+; ATOMIC-NEXT:    move.w %d1, %d3
+; ATOMIC-NEXT:    bra .LBB9_3
+; ATOMIC-NEXT:  .LBB9_4: ; %atomicrmw.end
+; ATOMIC-NEXT:    movem.l (0,%sp), %d2-%d3 ; 12-byte Folded Reload
+; ATOMIC-NEXT:    adda.l #8, %sp
+; ATOMIC-NEXT:    rts
+  %old = atomicrmw umax ptr %ptr, i16 %val seq_cst
+  ret i16 %old
+}

diff  --git a/llvm/test/CodeGen/M68k/pipeline.ll b/llvm/test/CodeGen/M68k/pipeline.ll
index 671ae74132f08..c2ca336442153 100644
--- a/llvm/test/CodeGen/M68k/pipeline.ll
+++ b/llvm/test/CodeGen/M68k/pipeline.ll
@@ -3,6 +3,7 @@
 ; CHECK-NEXT:    Pre-ISel Intrinsic Lowering
 ; CHECK-NEXT:    FunctionPass Manager
 ; CHECK-NEXT:      Expand large div/rem
+; CHECK-NEXT:      Expand Atomic instructions
 ; CHECK-NEXT:      Module Verifier
 ; CHECK-NEXT:      Dominator Tree Construction
 ; CHECK-NEXT:      Basic Alias Analysis (stateless AA impl)
@@ -131,4 +132,4 @@
 ; CHECK-NEXT:      Lazy Machine Block Frequency Analysis
 ; CHECK-NEXT:      Machine Optimization Remark Emitter
 ; CHECK-NEXT:      M68k Assembly Printer
-; CHECK-NEXT:     Free MachineFunction
\ No newline at end of file
+; CHECK-NEXT:      Free MachineFunction

diff  --git a/llvm/test/MC/Disassembler/M68k/atomics.txt b/llvm/test/MC/Disassembler/M68k/atomics.txt
new file mode 100644
index 0000000000000..bec3436c90e6a
--- /dev/null
+++ b/llvm/test/MC/Disassembler/M68k/atomics.txt
@@ -0,0 +1,10 @@
+# RUN: llvm-mc -disassemble %s -triple=m68k | FileCheck %s
+
+# CHECK: cas.b %d3, %d2, (%a2)
+0x0a 0xd2 0x00 0x83
+
+# CHECK: cas.w %d4, %d5, (%a3)
+0x0c 0xd3 0x01 0x44
+
+# CHECK: cas.l %d6, %d7, (%a4)
+0x0e 0xd4 0x01 0xc6

diff  --git a/llvm/test/MC/M68k/Atomics/cas.s b/llvm/test/MC/M68k/Atomics/cas.s
new file mode 100644
index 0000000000000..99e31d3f428b4
--- /dev/null
+++ b/llvm/test/MC/M68k/Atomics/cas.s
@@ -0,0 +1,13 @@
+; RUN: llvm-mc -show-encoding -triple=m68k %s | FileCheck %s
+
+; CHECK: cas.b %d3, %d2, (%a2)
+; CHECK-SAME: ; encoding: [0x0a,0xd2,0x00,0x83]
+cas.b %d3, %d2, (%a2)
+
+; CHECK: cas.w %d4, %d5, (%a3)
+; CHECK-SAME: ; encoding: [0x0c,0xd3,0x01,0x44]
+cas.w %d4, %d5, (%a3)
+
+; CHECK: cas.l %d6, %d7, (%a4)
+; CHECK-SAME: ; encoding: [0x0e,0xd4,0x01,0xc6]
+cas.l %d6, %d7, (%a4)


        


More information about the llvm-commits mailing list