[llvm] 5ac19e0 - [M68k](5/8) Target-specific lowering

Min-Yih Hsu via llvm-commits llvm-commits at lists.llvm.org
Mon Mar 8 12:33:51 PST 2021


Author: Min-Yih Hsu
Date: 2021-03-08T12:30:57-08:00
New Revision: 5ac19e0acf86ec2b080aaee6dd9226d64778ce83

URL: https://github.com/llvm/llvm-project/commit/5ac19e0acf86ec2b080aaee6dd9226d64778ce83
DIFF: https://github.com/llvm/llvm-project/commit/5ac19e0acf86ec2b080aaee6dd9226d64778ce83.diff

LOG: [M68k](5/8) Target-specific lowering

 - TargetMachine implementation for M68k
 - ISel, ISched for M68k
 - Other lowering (e.g. FrameLowering)
 - AsmPrinter

Authors: myhsu, m4yers, glaubitz

Differential Revision: https://reviews.llvm.org/D88391

Added: 
    llvm/lib/Target/M68k/M68k.h
    llvm/lib/Target/M68k/M68kAsmPrinter.cpp
    llvm/lib/Target/M68k/M68kAsmPrinter.h
    llvm/lib/Target/M68k/M68kCallingConv.h
    llvm/lib/Target/M68k/M68kCollapseMOVEMPass.cpp
    llvm/lib/Target/M68k/M68kExpandPseudo.cpp
    llvm/lib/Target/M68k/M68kFrameLowering.cpp
    llvm/lib/Target/M68k/M68kFrameLowering.h
    llvm/lib/Target/M68k/M68kISelDAGToDAG.cpp
    llvm/lib/Target/M68k/M68kISelLowering.cpp
    llvm/lib/Target/M68k/M68kISelLowering.h
    llvm/lib/Target/M68k/M68kInstrBuilder.h
    llvm/lib/Target/M68k/M68kInstrInfo.cpp
    llvm/lib/Target/M68k/M68kInstrInfo.h
    llvm/lib/Target/M68k/M68kMCInstLower.cpp
    llvm/lib/Target/M68k/M68kMCInstLower.h
    llvm/lib/Target/M68k/M68kMachineFunction.cpp
    llvm/lib/Target/M68k/M68kMachineFunction.h
    llvm/lib/Target/M68k/M68kRegisterInfo.cpp
    llvm/lib/Target/M68k/M68kRegisterInfo.h
    llvm/lib/Target/M68k/M68kSubtarget.cpp
    llvm/lib/Target/M68k/M68kSubtarget.h
    llvm/lib/Target/M68k/M68kTargetMachine.h
    llvm/lib/Target/M68k/M68kTargetObjectFile.cpp
    llvm/lib/Target/M68k/M68kTargetObjectFile.h

Modified: 
    llvm/lib/Target/M68k/CMakeLists.txt
    llvm/lib/Target/M68k/M68kTargetMachine.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/M68k/CMakeLists.txt b/llvm/lib/Target/M68k/CMakeLists.txt
index 1bd78445f572..b894096b38e9 100644
--- a/llvm/lib/Target/M68k/CMakeLists.txt
+++ b/llvm/lib/Target/M68k/CMakeLists.txt
@@ -14,7 +14,19 @@ tablegen(LLVM M68kGenAsmWriter.inc        -gen-asm-writer)
 add_public_tablegen_target(M68kCommonTableGen)
 
 add_llvm_target(M68kCodeGen
+  M68kAsmPrinter.cpp
+  M68kCollapseMOVEMPass.cpp
+  M68kExpandPseudo.cpp
+  M68kFrameLowering.cpp
+  M68kInstrInfo.cpp
+  M68kISelLowering.cpp
+  M68kISelDAGToDAG.cpp
+  M68kMachineFunction.cpp
+  M68kMCInstLower.cpp
+  M68kRegisterInfo.cpp
+  M68kSubtarget.cpp
   M68kTargetMachine.cpp
+  M68kTargetObjectFile.cpp
 
   LINK_COMPONENTS
   Analysis

diff  --git a/llvm/lib/Target/M68k/M68k.h b/llvm/lib/Target/M68k/M68k.h
new file mode 100644
index 000000000000..98e508836323
--- /dev/null
+++ b/llvm/lib/Target/M68k/M68k.h
@@ -0,0 +1,50 @@
+//===- M68k.h - Top-level interface for M68k representation -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains the entry points for global functions defined in the
+/// M68k target library, as used by the LLVM JIT.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_M68K_M68K_H
+#define LLVM_LIB_TARGET_M68K_M68K_H
+
+namespace llvm {
+
+class FunctionPass;
+class M68kTargetMachine;
+
+/// This pass converts a legalized DAG into a M68k-specific DAG, ready for
+/// instruction scheduling.
+FunctionPass *createM68kISelDag(M68kTargetMachine &TM);
+
+/// Return a Machine IR pass that expands M68k-specific pseudo
+/// instructions into a sequence of actual instructions. This pass
+/// must run after prologue/epilogue insertion and before lowering
+/// the MachineInstr to MC.
+FunctionPass *createM68kExpandPseudoPass();
+
+/// This pass initializes a global base register for PIC on M68k.
+FunctionPass *createM68kGlobalBaseRegPass();
+
+/// Finds sequential MOVEM instruction and collapse them into a single one. This
+/// pass has to be run after all pseudo expansions and prologue/epilogue
+/// emission so that all possible MOVEM are already in place.
+FunctionPass *createM68kCollapseMOVEMPass();
+
+/// Finds MOVE instructions before any conditioanl branch instruction and
+/// replaces them with MOVEM instruction. Motorola's MOVEs do trash(V,C) flags
+/// register which prevents branch from taking the correct route. This pass
+/// has to be run after all pseudo expansions and prologue/epilogue emission
+/// so that all possible MOVEs are present.
+FunctionPass *createM68kConvertMOVToMOVMPass();
+
+} // namespace llvm
+
+#endif

diff  --git a/llvm/lib/Target/M68k/M68kAsmPrinter.cpp b/llvm/lib/Target/M68k/M68kAsmPrinter.cpp
new file mode 100644
index 000000000000..afb458c4003c
--- /dev/null
+++ b/llvm/lib/Target/M68k/M68kAsmPrinter.cpp
@@ -0,0 +1,69 @@
+//===----- M68kAsmPrinter.cpp - M68k LLVM Assembly Printer -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains a printer that converts from our internal representation
+/// of machine-dependent LLVM code to GAS-format M68k assembly language.
+///
+//===----------------------------------------------------------------------===//
+
+// TODO Conform to Motorola ASM syntax
+
+#include "M68kAsmPrinter.h"
+
+#include "M68k.h"
+#include "M68kMachineFunction.h"
+
+#include "llvm/Support/TargetRegistry.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "m68k-asm-printer"
+
+bool M68kAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
+  MMFI = MF.getInfo<M68kMachineFunctionInfo>();
+  MCInstLowering = std::make_unique<M68kMCInstLower>(MF, *this);
+  AsmPrinter::runOnMachineFunction(MF);
+  return true;
+}
+
+void M68kAsmPrinter::emitInstruction(const MachineInstr *MI) {
+  switch (MI->getOpcode()) {
+  default: {
+    if (MI->isPseudo()) {
+      LLVM_DEBUG(dbgs() << "Pseudo opcode(" << MI->getOpcode()
+                        << ") found in EmitInstruction()\n");
+      llvm_unreachable("Cannot proceed");
+    }
+    break;
+  }
+  case M68k::TAILJMPj:
+  case M68k::TAILJMPq:
+    // Lower these as normal, but add some comments.
+    OutStreamer->AddComment("TAILCALL");
+    break;
+  }
+
+  MCInst TmpInst0;
+  MCInstLowering->Lower(MI, TmpInst0);
+  OutStreamer->emitInstruction(TmpInst0, getSubtargetInfo());
+}
+
+void M68kAsmPrinter::emitFunctionBodyStart() {}
+
+void M68kAsmPrinter::emitFunctionBodyEnd() {}
+
+void M68kAsmPrinter::emitStartOfAsmFile(Module &M) {
+  OutStreamer->emitSyntaxDirective();
+}
+
+void M68kAsmPrinter::emitEndOfAsmFile(Module &M) {}
+
+extern "C" void LLVMInitializeM68kAsmPrinter() {
+  RegisterAsmPrinter<M68kAsmPrinter> X(TheM68kTarget);
+}

diff  --git a/llvm/lib/Target/M68k/M68kAsmPrinter.h b/llvm/lib/Target/M68k/M68kAsmPrinter.h
new file mode 100644
index 000000000000..f17a90fd475d
--- /dev/null
+++ b/llvm/lib/Target/M68k/M68kAsmPrinter.h
@@ -0,0 +1,64 @@
+//===----- M68kAsmPrinter.h - M68k LLVM Assembly Printer -------- C++ -*--===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains M68k assembler printer declarations.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_M68K_M68KASMPRINTER_H
+#define LLVM_LIB_TARGET_M68K_M68KASMPRINTER_H
+
+#include "M68kMCInstLower.h"
+#include "M68kTargetMachine.h"
+
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Target/TargetMachine.h"
+#include <memory>
+#include <utility>
+
+namespace llvm {
+class MCStreamer;
+class MachineInstr;
+class MachineBasicBlock;
+class Module;
+class raw_ostream;
+
+class M68kSubtarget;
+class M68kMachineFunctionInfo;
+
+class LLVM_LIBRARY_VISIBILITY M68kAsmPrinter : public AsmPrinter {
+
+  void EmitInstrWithMacroNoAT(const MachineInstr *MI);
+
+public:
+  const M68kSubtarget *Subtarget;
+  const M68kMachineFunctionInfo *MMFI;
+  std::unique_ptr<M68kMCInstLower> MCInstLowering;
+
+  explicit M68kAsmPrinter(TargetMachine &TM,
+                          std::unique_ptr<MCStreamer> Streamer)
+      : AsmPrinter(TM, std::move(Streamer)) {
+    Subtarget = static_cast<M68kTargetMachine &>(TM).getSubtargetImpl();
+  }
+
+  StringRef getPassName() const override { return "M68k Assembly Printer"; }
+
+  virtual bool runOnMachineFunction(MachineFunction &MF) override;
+
+  void emitInstruction(const MachineInstr *MI) override;
+  void emitFunctionBodyStart() override;
+  void emitFunctionBodyEnd() override;
+  void emitStartOfAsmFile(Module &M) override;
+  void emitEndOfAsmFile(Module &M) override;
+};
+} // namespace llvm
+
+#endif

diff  --git a/llvm/lib/Target/M68k/M68kCallingConv.h b/llvm/lib/Target/M68k/M68kCallingConv.h
new file mode 100644
index 000000000000..18f72c95cedb
--- /dev/null
+++ b/llvm/lib/Target/M68k/M68kCallingConv.h
@@ -0,0 +1,77 @@
+//===-- M68kCallingConv.h - M68k Custom CC Routines ---------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains the custom routines for the M68k Calling Convention
+/// that aren't done by tablegen.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_M68K_M68KCALLINGCONV_H
+#define LLVM_LIB_TARGET_M68K_M68KCALLINGCONV_H
+
+#include "MCTargetDesc/M68kMCTargetDesc.h"
+
+#include "llvm/CodeGen/CallingConvLower.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/Function.h"
+
+namespace llvm {
+
+/// Custom state to propagate llvm type info to register CC assigner
+class M68kCCState : public CCState {
+public:
+  const llvm::Function &F;
+
+  M68kCCState(const llvm::Function &F, CallingConv::ID CC, bool IsVarArg,
+              MachineFunction &MF, SmallVectorImpl<CCValAssign> &Locs,
+              LLVMContext &C)
+      : CCState(CC, IsVarArg, MF, Locs, C), F(F) {}
+};
+
+/// NOTE this function is used to select registers for formal arguments and call
+/// FIXME: Handling on pointer arguments is not complete
+inline bool CC_M68k_Any_AssignToReg(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+                                    CCValAssign::LocInfo &LocInfo,
+                                    ISD::ArgFlagsTy &ArgFlags, CCState &State) {
+  M68kCCState CCInfo = static_cast<M68kCCState &>(State);
+
+  static const MCPhysReg DataRegList[] = {M68k::D0, M68k::D1, M68k::A0,
+                                          M68k::A1};
+
+  // Address registers have %a register priority
+  static const MCPhysReg AddrRegList[] = {
+      M68k::A0,
+      M68k::A1,
+      M68k::D0,
+      M68k::D1,
+  };
+
+  auto I = CCInfo.F.arg_begin();
+  int No = ValNo;
+  while (No > 0) {
+    No -= I->getType()->isIntegerTy(64) ? 2 : 1;
+    I++;
+  }
+
+  bool IsPtr = I != CCInfo.F.arg_end() && I->getType()->isPointerTy();
+
+  unsigned Reg =
+      IsPtr ? State.AllocateReg(AddrRegList) : State.AllocateReg(DataRegList);
+
+  if (Reg) {
+    State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+    return true;
+  }
+
+  return false;
+}
+
+} // namespace llvm
+
+#endif

diff  --git a/llvm/lib/Target/M68k/M68kCollapseMOVEMPass.cpp b/llvm/lib/Target/M68k/M68kCollapseMOVEMPass.cpp
new file mode 100644
index 000000000000..4149ae92ffe9
--- /dev/null
+++ b/llvm/lib/Target/M68k/M68kCollapseMOVEMPass.cpp
@@ -0,0 +1,307 @@
+//===----- M68kCollapseMOVEMPass.cpp - Expand MOVEM pass --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// `MOVEM` is an instruction that moves multiple registers a time according to
+/// the given mask. Thus sometimes it's pretty expensive.
+/// This file contains a pass that collapses sequential MOVEM instructions into
+/// a single one.
+///
+//===----------------------------------------------------------------------===//
+
+#include "M68k.h"
+#include "M68kFrameLowering.h"
+#include "M68kInstrInfo.h"
+#include "M68kMachineFunction.h"
+#include "M68kSubtarget.h"
+
+#include "llvm/Analysis/EHPersonalities.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/Support/MathExtras.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "M68k-collapse-movem"
+
+namespace {
+
+enum UpdateType { Ascending, Descending, Intermixed };
+
+/// An abtraction of the MOVEM chain currently processing
+class MOVEMState {
+  MachineBasicBlock::iterator Begin;
+  MachineBasicBlock::iterator End;
+
+  unsigned Base;
+
+  int Start;
+  int Stop;
+
+  unsigned Mask;
+
+  enum class AccessTy { None, Load, Store };
+  AccessTy Access;
+
+public:
+  MOVEMState()
+      : Begin(nullptr), End(nullptr), Base(0), Start(INT_MIN), Stop(INT_MAX),
+        Mask(0), Access(AccessTy::None) {}
+
+  void setBegin(MachineBasicBlock::iterator &MI) {
+    assert(Begin == nullptr);
+    Begin = MI;
+  }
+
+  void setEnd(MachineBasicBlock::iterator &MI) {
+    assert(End == nullptr);
+    End = MI;
+  }
+
+  bool hasBase() const { return Base != 0; }
+
+  unsigned getBase() const {
+    assert(Base);
+    return Base;
+  }
+
+  MachineBasicBlock::iterator begin() {
+    assert(Begin != nullptr);
+    return Begin;
+  }
+
+  MachineBasicBlock::iterator end() {
+    assert(End != nullptr);
+    return End;
+  }
+
+  unsigned getMask() const { return Mask; }
+
+  void setBase(int Value) {
+    assert(!hasBase());
+    Base = Value;
+  }
+
+  // You need to call this before Mask update
+  UpdateType classifyUpdateByMask(unsigned NewMask) const {
+    assert(NewMask && "Mask needs to select at least one register");
+
+    if (NewMask > Mask) {
+      return Ascending;
+    } else if (NewMask < Mask) {
+      return Descending;
+    }
+
+    return Intermixed;
+  }
+
+  bool update(int O, int M) {
+    UpdateType Type = classifyUpdateByMask(M);
+    if (Type == Intermixed)
+      return false;
+    if (Start == INT_MIN) {
+      Start = Stop = O;
+      updateMask(M);
+      return true;
+    } else if (Type == Descending && O == Start - 4) {
+      Start -= 4;
+      updateMask(M);
+      return true;
+    } else if (Type == Ascending && O == Stop + 4) {
+      Stop += 4;
+      updateMask(M);
+      return true;
+    }
+
+    return false;
+  }
+
+  int getFinalOffset() const {
+    assert(
+        Start != INT_MIN &&
+        "MOVEM in control mode should increment the address in each iteration");
+    return Start;
+  }
+
+  bool updateMask(unsigned Value) {
+    assert(isUInt<16>(Value) && "Mask must fit 16 bit");
+    assert(!(Value & Mask) &&
+           "This is weird, there should be no intersections");
+    Mask |= Value;
+    return true;
+  }
+
+  void setLoad() { Access = AccessTy::Load; }
+  void setStore() { Access = AccessTy::Store; }
+
+  bool isLoad() const { return Access == AccessTy::Load; }
+  bool isStore() const { return Access == AccessTy::Store; }
+};
+
+/// This Pass first walks through all the MOVEM instructions
+/// that are chained together and record each of the
+/// instruction's properties like register mask and data
+/// access type into a `MOVEState` instance.
+/// Then we perform reduction / collapsing on this `MOVEMState`
+/// representation before creating a new `MOVEM` instruction
+/// based on the collapsed result, as well as removing
+/// redundant `MOVEM` instructions.
+class M68kCollapseMOVEM : public MachineFunctionPass {
+public:
+  static char ID;
+
+  const M68kSubtarget *STI;
+  const M68kInstrInfo *TII;
+  const M68kRegisterInfo *TRI;
+  const M68kMachineFunctionInfo *MFI;
+  const M68kFrameLowering *FL;
+
+  M68kCollapseMOVEM() : MachineFunctionPass(ID) {}
+
+  void Finish(MachineBasicBlock &MBB, MOVEMState &State) {
+    auto MI = State.begin();
+    auto End = State.end();
+    DebugLoc DL = MI->getDebugLoc();
+
+    // No need to delete then add a single instruction
+    if (std::next(MI) == End) {
+      State = MOVEMState();
+      return;
+    }
+
+    // Delete all the MOVEM instruction till the end
+    while (MI != End) {
+      auto Next = std::next(MI);
+      MBB.erase(MI);
+      MI = Next;
+    }
+
+    // Add a unified one
+    if (State.isLoad()) {
+      BuildMI(MBB, End, DL, TII->get(M68k::MOVM32mp))
+          .addImm(State.getMask())
+          .addImm(State.getFinalOffset())
+          .addReg(State.getBase());
+    } else {
+      BuildMI(MBB, End, DL, TII->get(M68k::MOVM32pm))
+          .addImm(State.getFinalOffset())
+          .addReg(State.getBase())
+          .addImm(State.getMask());
+    }
+
+    State = MOVEMState();
+  }
+
+  bool ProcessMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
+                 MOVEMState &State, unsigned Mask, int Offset, unsigned Reg,
+                 bool IsStore = false) {
+    if (State.hasBase()) {
+      // If current Type, Reg, Offset and Mask is in proper order  then
+      // merge in the state
+      MOVEMState Temp = State;
+      if (State.isStore() == IsStore && State.getBase() == Reg &&
+          State.update(Offset, Mask)) {
+        return true;
+        // Otherwise we Finish processing of the current MOVEM sequance and
+        // start a new one
+      } else {
+        State = Temp;
+        State.setEnd(MI);
+        Finish(MBB, State);
+        return ProcessMI(MBB, MI, State, Mask, Offset, Reg, IsStore);
+      }
+      // If this is the first instruction is sequance then initialize the State
+    } else if (Reg == TRI->getStackRegister() ||
+               Reg == TRI->getBaseRegister() ||
+               Reg == TRI->getFrameRegister(*MBB.getParent())) {
+      State.setBegin(MI);
+      State.setBase(Reg);
+      State.update(Offset, Mask);
+      IsStore ? State.setStore() : State.setLoad();
+      return true;
+    }
+    return false;
+  }
+
+  bool runOnMachineFunction(MachineFunction &MF) override {
+    STI = &static_cast<const M68kSubtarget &>(MF.getSubtarget());
+    TII = STI->getInstrInfo();
+    TRI = STI->getRegisterInfo();
+    MFI = MF.getInfo<M68kMachineFunctionInfo>();
+    FL = STI->getFrameLowering();
+
+    bool Modified = false;
+
+    MOVEMState State;
+
+    unsigned Mask = 0;
+    unsigned Reg = 0;
+    int Offset = 0;
+
+    for (auto &MBB : MF) {
+      auto MI = MBB.begin(), E = MBB.end();
+      while (MI != E) {
+        // Processing might change current instruction, save next first
+        auto NMI = std::next(MI);
+        switch (MI->getOpcode()) {
+        default:
+          if (State.hasBase()) {
+            State.setEnd(MI);
+            Finish(MBB, State);
+            Modified = true;
+          }
+          break;
+        case M68k::MOVM32jm:
+          Mask = MI->getOperand(1).getImm();
+          Reg = MI->getOperand(0).getReg();
+          Offset = 0;
+          Modified |= ProcessMI(MBB, MI, State, Mask, Offset, Reg, true);
+          break;
+        case M68k::MOVM32pm:
+          Mask = MI->getOperand(2).getImm();
+          Reg = MI->getOperand(1).getReg();
+          Offset = MI->getOperand(0).getImm();
+          Modified |= ProcessMI(MBB, MI, State, Mask, Offset, Reg, true);
+          break;
+        case M68k::MOVM32mj:
+          Mask = MI->getOperand(0).getImm();
+          Reg = MI->getOperand(1).getReg();
+          Offset = 0;
+          Modified |= ProcessMI(MBB, MI, State, Mask, Offset, Reg, false);
+          break;
+        case M68k::MOVM32mp:
+          Mask = MI->getOperand(0).getImm();
+          Reg = MI->getOperand(2).getReg();
+          Offset = MI->getOperand(1).getImm();
+          Modified |= ProcessMI(MBB, MI, State, Mask, Offset, Reg, false);
+          break;
+        }
+        MI = NMI;
+      }
+
+      if (State.hasBase()) {
+        State.setEnd(MI);
+        Finish(MBB, State);
+      }
+    }
+
+    return Modified;
+  }
+
+  StringRef getPassName() const override { return "M68k MOVEM collapser pass"; }
+};
+
+char M68kCollapseMOVEM::ID = 0;
+} // anonymous namespace.
+
+/// Returns an instance of the pseudo instruction expansion pass.
+FunctionPass *llvm::createM68kCollapseMOVEMPass() {
+  return new M68kCollapseMOVEM();
+}

diff  --git a/llvm/lib/Target/M68k/M68kExpandPseudo.cpp b/llvm/lib/Target/M68k/M68kExpandPseudo.cpp
new file mode 100644
index 000000000000..6a4aeaab518a
--- /dev/null
+++ b/llvm/lib/Target/M68k/M68kExpandPseudo.cpp
@@ -0,0 +1,320 @@
+//===--M68kExpandPseudo.cpp - Expand pseudo instructions ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains a pass that expands pseudo instructions into target
+/// instructions to allow proper scheduling, if-conversion, other late
+/// optimizations, or simply the encoding of the instructions.
+///
+//===----------------------------------------------------------------------===//
+
+#include "M68k.h"
+#include "M68kFrameLowering.h"
+#include "M68kInstrInfo.h"
+#include "M68kMachineFunction.h"
+#include "M68kSubtarget.h"
+
+#include "llvm/Analysis/EHPersonalities.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h" // For IDs of passes that are preserved.
+#include "llvm/IR/GlobalValue.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "M68k-expand-pseudos"
+
+namespace {
+class M68kExpandPseudo : public MachineFunctionPass {
+public:
+  static char ID;
+  M68kExpandPseudo() : MachineFunctionPass(ID) {}
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.setPreservesCFG();
+    AU.addPreservedID(MachineLoopInfoID);
+    AU.addPreservedID(MachineDominatorsID);
+    MachineFunctionPass::getAnalysisUsage(AU);
+  }
+
+  const M68kSubtarget *STI;
+  const M68kInstrInfo *TII;
+  const M68kRegisterInfo *TRI;
+  const M68kMachineFunctionInfo *MFI;
+  const M68kFrameLowering *FL;
+
+  bool runOnMachineFunction(MachineFunction &Fn) override;
+
+  MachineFunctionProperties getRequiredProperties() const override {
+    return MachineFunctionProperties().set(
+        MachineFunctionProperties::Property::NoVRegs);
+  }
+
+  StringRef getPassName() const override {
+    return "M68k pseudo instruction expansion pass";
+  }
+
+private:
+  bool ExpandMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI);
+  bool ExpandMBB(MachineBasicBlock &MBB);
+};
+char M68kExpandPseudo::ID = 0;
+} // End anonymous namespace.
+
+/// If \p MBBI is a pseudo instruction, this method expands
+/// it to the corresponding (sequence of) actual instruction(s).
+/// \returns true if \p MBBI has been expanded.
+bool M68kExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
+                                MachineBasicBlock::iterator MBBI) {
+  MachineInstr &MI = *MBBI;
+  MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI);
+  unsigned Opcode = MI.getOpcode();
+  DebugLoc DL = MBBI->getDebugLoc();
+  /// TODO infer argument size to create less switch cases
+  switch (Opcode) {
+  default:
+    return false;
+
+  case M68k::MOVXd16d8:
+    return TII->ExpandMOVX_RR(MIB, MVT::i16, MVT::i8);
+  case M68k::MOVXd32d8:
+    return TII->ExpandMOVX_RR(MIB, MVT::i32, MVT::i8);
+  case M68k::MOVXd32d16:
+    return TII->ExpandMOVX_RR(MIB, MVT::i32, MVT::i16);
+
+  case M68k::MOVSXd16d8:
+    return TII->ExpandMOVSZX_RR(MIB, true, MVT::i16, MVT::i8);
+  case M68k::MOVSXd32d8:
+    return TII->ExpandMOVSZX_RR(MIB, true, MVT::i32, MVT::i8);
+  case M68k::MOVSXd32d16:
+    return TII->ExpandMOVSZX_RR(MIB, true, MVT::i32, MVT::i16);
+
+  case M68k::MOVZXd16d8:
+    return TII->ExpandMOVSZX_RR(MIB, false, MVT::i16, MVT::i8);
+  case M68k::MOVZXd32d8:
+    return TII->ExpandMOVSZX_RR(MIB, false, MVT::i32, MVT::i8);
+  case M68k::MOVZXd32d16:
+    return TII->ExpandMOVSZX_RR(MIB, false, MVT::i32, MVT::i16);
+
+  case M68k::MOVSXd16j8:
+    return TII->ExpandMOVSZX_RM(MIB, true, TII->get(M68k::MOV8dj), MVT::i16,
+                                MVT::i8);
+  case M68k::MOVSXd32j8:
+    return TII->ExpandMOVSZX_RM(MIB, true, TII->get(M68k::MOV8dj), MVT::i32,
+                                MVT::i8);
+  case M68k::MOVSXd32j16:
+    return TII->ExpandMOVSZX_RM(MIB, true, TII->get(M68k::MOV16rj), MVT::i32,
+                                MVT::i16);
+
+  case M68k::MOVZXd16j8:
+    return TII->ExpandMOVSZX_RM(MIB, false, TII->get(M68k::MOV8dj), MVT::i16,
+                                MVT::i8);
+  case M68k::MOVZXd32j8:
+    return TII->ExpandMOVSZX_RM(MIB, false, TII->get(M68k::MOV8dj), MVT::i32,
+                                MVT::i8);
+  case M68k::MOVZXd32j16:
+    return TII->ExpandMOVSZX_RM(MIB, false, TII->get(M68k::MOV16rj), MVT::i32,
+                                MVT::i16);
+
+  case M68k::MOVSXd16p8:
+    return TII->ExpandMOVSZX_RM(MIB, true, TII->get(M68k::MOV8dp), MVT::i16,
+                                MVT::i8);
+  case M68k::MOVSXd32p8:
+    return TII->ExpandMOVSZX_RM(MIB, true, TII->get(M68k::MOV8dp), MVT::i32,
+                                MVT::i8);
+  case M68k::MOVSXd32p16:
+    return TII->ExpandMOVSZX_RM(MIB, true, TII->get(M68k::MOV16rp), MVT::i32,
+                                MVT::i16);
+
+  case M68k::MOVZXd16p8:
+    return TII->ExpandMOVSZX_RM(MIB, false, TII->get(M68k::MOV8dp), MVT::i16,
+                                MVT::i8);
+  case M68k::MOVZXd32p8:
+    return TII->ExpandMOVSZX_RM(MIB, false, TII->get(M68k::MOV8dp), MVT::i32,
+                                MVT::i8);
+  case M68k::MOVZXd32p16:
+    return TII->ExpandMOVSZX_RM(MIB, false, TII->get(M68k::MOV16rp), MVT::i32,
+                                MVT::i16);
+
+  case M68k::MOVSXd16f8:
+    return TII->ExpandMOVSZX_RM(MIB, true, TII->get(M68k::MOV8df), MVT::i16,
+                                MVT::i8);
+  case M68k::MOVSXd32f8:
+    return TII->ExpandMOVSZX_RM(MIB, true, TII->get(M68k::MOV8df), MVT::i32,
+                                MVT::i8);
+  case M68k::MOVSXd32f16:
+    return TII->ExpandMOVSZX_RM(MIB, true, TII->get(M68k::MOV16rf), MVT::i32,
+                                MVT::i16);
+
+  case M68k::MOVZXd16f8:
+    return TII->ExpandMOVSZX_RM(MIB, false, TII->get(M68k::MOV8df), MVT::i16,
+                                MVT::i8);
+  case M68k::MOVZXd32f8:
+    return TII->ExpandMOVSZX_RM(MIB, false, TII->get(M68k::MOV8df), MVT::i32,
+                                MVT::i8);
+  case M68k::MOVZXd32f16:
+    return TII->ExpandMOVSZX_RM(MIB, false, TII->get(M68k::MOV16rf), MVT::i32,
+                                MVT::i16);
+
+  case M68k::MOV8cd:
+    return TII->ExpandCCR(MIB, /*IsToCCR=*/true);
+  case M68k::MOV8dc:
+    return TII->ExpandCCR(MIB, /*IsToCCR=*/false);
+
+  case M68k::MOVM8jm_P:
+    return TII->ExpandMOVEM(MIB, TII->get(M68k::MOVM32jm), /*IsRM=*/false);
+  case M68k::MOVM16jm_P:
+    return TII->ExpandMOVEM(MIB, TII->get(M68k::MOVM32jm), /*IsRM=*/false);
+  case M68k::MOVM32jm_P:
+    return TII->ExpandMOVEM(MIB, TII->get(M68k::MOVM32jm), /*IsRM=*/false);
+
+  case M68k::MOVM8pm_P:
+    return TII->ExpandMOVEM(MIB, TII->get(M68k::MOVM32pm), /*IsRM=*/false);
+  case M68k::MOVM16pm_P:
+    return TII->ExpandMOVEM(MIB, TII->get(M68k::MOVM32pm), /*IsRM=*/false);
+  case M68k::MOVM32pm_P:
+    return TII->ExpandMOVEM(MIB, TII->get(M68k::MOVM32pm), /*IsRM=*/false);
+
+  case M68k::MOVM8mj_P:
+    return TII->ExpandMOVEM(MIB, TII->get(M68k::MOVM32mj), /*IsRM=*/true);
+  case M68k::MOVM16mj_P:
+    return TII->ExpandMOVEM(MIB, TII->get(M68k::MOVM32mj), /*IsRM=*/true);
+  case M68k::MOVM32mj_P:
+    return TII->ExpandMOVEM(MIB, TII->get(M68k::MOVM32mj), /*IsRM=*/true);
+
+  case M68k::MOVM8mp_P:
+    return TII->ExpandMOVEM(MIB, TII->get(M68k::MOVM32mp), /*IsRM=*/true);
+  case M68k::MOVM16mp_P:
+    return TII->ExpandMOVEM(MIB, TII->get(M68k::MOVM32mp), /*IsRM=*/true);
+  case M68k::MOVM32mp_P:
+    return TII->ExpandMOVEM(MIB, TII->get(M68k::MOVM32mp), /*IsRM=*/true);
+
+  case M68k::TCRETURNq:
+  case M68k::TCRETURNj: {
+    MachineOperand &JumpTarget = MI.getOperand(0);
+    MachineOperand &StackAdjust = MI.getOperand(1);
+    assert(StackAdjust.isImm() && "Expecting immediate value.");
+
+    // Adjust stack pointer.
+    int StackAdj = StackAdjust.getImm();
+    int MaxTCDelta = MFI->getTCReturnAddrDelta();
+    int Offset = 0;
+    assert(MaxTCDelta <= 0 && "MaxTCDelta should never be positive");
+
+    // Incoporate the retaddr area.
+    Offset = StackAdj - MaxTCDelta;
+    assert(Offset >= 0 && "Offset should never be negative");
+
+    if (Offset) {
+      // Check for possible merge with preceding ADD instruction.
+      Offset += FL->mergeSPUpdates(MBB, MBBI, true);
+      FL->emitSPUpdate(MBB, MBBI, Offset, /*InEpilogue=*/true);
+    }
+
+    // Jump to label or value in register.
+    if (Opcode == M68k::TCRETURNq) {
+      MachineInstrBuilder MIB =
+          BuildMI(MBB, MBBI, DL, TII->get(M68k::TAILJMPq));
+      if (JumpTarget.isGlobal()) {
+        MIB.addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
+                             JumpTarget.getTargetFlags());
+      } else {
+        assert(JumpTarget.isSymbol());
+        MIB.addExternalSymbol(JumpTarget.getSymbolName(),
+                              JumpTarget.getTargetFlags());
+      }
+    } else {
+      BuildMI(MBB, MBBI, DL, TII->get(M68k::TAILJMPj))
+          .addReg(JumpTarget.getReg(), RegState::Kill);
+    }
+
+    MachineInstr &NewMI = *std::prev(MBBI);
+    NewMI.copyImplicitOps(*MBBI->getParent()->getParent(), *MBBI);
+
+    // Delete the pseudo instruction TCRETURN.
+    MBB.erase(MBBI);
+
+    return true;
+  }
+  case M68k::RET: {
+    // Adjust stack to erase error code
+    int64_t StackAdj = MBBI->getOperand(0).getImm();
+    MachineInstrBuilder MIB;
+
+    if (StackAdj == 0) {
+      MIB = BuildMI(MBB, MBBI, DL, TII->get(M68k::RTS));
+    } else if (isUInt<16>(StackAdj)) {
+
+      if (STI->atLeastM68020()) {
+        llvm_unreachable("RTD is not implemented");
+      } else {
+        // Copy PC from stack to a free address(A0 or A1) register
+        // TODO check if pseudo expand uses free address register
+        BuildMI(MBB, MBBI, DL, TII->get(M68k::MOV32aj), M68k::A1)
+            .addReg(M68k::SP);
+
+        // Adjust SP
+        FL->emitSPUpdate(MBB, MBBI, StackAdj, /*InEpilogue=*/true);
+
+        // Put the return address on stack
+        BuildMI(MBB, MBBI, DL, TII->get(M68k::MOV32ja))
+            .addReg(M68k::SP)
+            .addReg(M68k::A1);
+
+        // RTS
+        BuildMI(MBB, MBBI, DL, TII->get(M68k::RTS));
+      }
+    } else {
+      // TODO: RTD can only handle immediates as big as 2**16-1.
+      // If we need to pop off bytes before the return address, we
+      // must do it manually.
+      llvm_unreachable("Stack adjustment size not supported");
+    }
+
+    // FIXME: Can rest of the operands be ignored, if there is any?
+    MBB.erase(MBBI);
+    return true;
+  }
+  }
+  llvm_unreachable("Previous switch has a fallthrough?");
+}
+
+/// Expand all pseudo instructions contained in \p MBB.
+/// \returns true if any expansion occurred for \p MBB.
+bool M68kExpandPseudo::ExpandMBB(MachineBasicBlock &MBB) {
+  bool Modified = false;
+
+  // MBBI may be invalidated by the expansion.
+  MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
+  while (MBBI != E) {
+    MachineBasicBlock::iterator NMBBI = std::next(MBBI);
+    Modified |= ExpandMI(MBB, MBBI);
+    MBBI = NMBBI;
+  }
+
+  return Modified;
+}
+
+bool M68kExpandPseudo::runOnMachineFunction(MachineFunction &MF) {
+  STI = &static_cast<const M68kSubtarget &>(MF.getSubtarget());
+  TII = STI->getInstrInfo();
+  TRI = STI->getRegisterInfo();
+  MFI = MF.getInfo<M68kMachineFunctionInfo>();
+  FL = STI->getFrameLowering();
+
+  bool Modified = false;
+  for (MachineBasicBlock &MBB : MF)
+    Modified |= ExpandMBB(MBB);
+  return Modified;
+}
+
+/// Returns an instance of the pseudo instruction expansion pass.
+FunctionPass *llvm::createM68kExpandPseudoPass() {
+  return new M68kExpandPseudo();
+}

diff  --git a/llvm/lib/Target/M68k/M68kFrameLowering.cpp b/llvm/lib/Target/M68k/M68kFrameLowering.cpp
new file mode 100644
index 000000000000..884ddca2389e
--- /dev/null
+++ b/llvm/lib/Target/M68k/M68kFrameLowering.cpp
@@ -0,0 +1,896 @@
+//===-- M68kFrameLowering.cpp - M68k Frame Information ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains the M68k implementation of TargetFrameLowering class.
+///
+//===----------------------------------------------------------------------===//
+
+#include "M68kFrameLowering.h"
+
+#include "M68kInstrBuilder.h"
+#include "M68kInstrInfo.h"
+#include "M68kMachineFunction.h"
+#include "M68kSubtarget.h"
+
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Function.h"
+#include "llvm/Support/Alignment.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+
+using namespace llvm;
+
+M68kFrameLowering::M68kFrameLowering(const M68kSubtarget &STI, Align Alignment)
+    : TargetFrameLowering(StackGrowsDown, Alignment, -4), STI(STI),
+      TII(*STI.getInstrInfo()), TRI(STI.getRegisterInfo()) {
+  SlotSize = STI.getSlotSize();
+  StackPtr = TRI->getStackRegister();
+}
+
+bool M68kFrameLowering::hasFP(const MachineFunction &MF) const {
+  const MachineFrameInfo &MFI = MF.getFrameInfo();
+  const TargetRegisterInfo *TRI = STI.getRegisterInfo();
+
+  return MF.getTarget().Options.DisableFramePointerElim(MF) ||
+         MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken() ||
+         TRI->needsStackRealignment(MF);
+}
+
+// FIXME Make sure no other factors prevent us from reserving call frame
+bool M68kFrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
+  return !MF.getFrameInfo().hasVarSizedObjects() &&
+         !MF.getInfo<M68kMachineFunctionInfo>()->getHasPushSequences();
+}
+
+bool M68kFrameLowering::canSimplifyCallFramePseudos(
+    const MachineFunction &MF) const {
+  return hasReservedCallFrame(MF) ||
+         (hasFP(MF) && !TRI->needsStackRealignment(MF)) ||
+         TRI->hasBasePointer(MF);
+}
+
+bool M68kFrameLowering::needsFrameIndexResolution(
+    const MachineFunction &MF) const {
+  return MF.getFrameInfo().hasStackObjects() ||
+         MF.getInfo<M68kMachineFunctionInfo>()->getHasPushSequences();
+}
+
+// NOTE: this only has a subset of the full frame index logic. In
+// particular, the FI < 0 and AfterFPPop logic is handled in
+// M68kRegisterInfo::eliminateFrameIndex, but not here. Possibly
+// (probably?) it should be moved into here.
+StackOffset
+M68kFrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
+                                          Register &FrameReg) const {
+  const MachineFrameInfo &MFI = MF.getFrameInfo();
+
+  // We can't calculate offset from frame pointer if the stack is realigned,
+  // so enforce usage of stack/base pointer.  The base pointer is used when we
+  // have dynamic allocas in addition to dynamic realignment.
+  if (TRI->hasBasePointer(MF))
+    FrameReg = TRI->getBaseRegister();
+  else if (TRI->needsStackRealignment(MF))
+    FrameReg = TRI->getStackRegister();
+  else
+    FrameReg = TRI->getFrameRegister(MF);
+
+  // Offset will hold the offset from the stack pointer at function entry to the
+  // object.
+  // We need to factor in additional offsets applied during the prologue to the
+  // frame, base, and stack pointer depending on which is used.
+  int Offset = MFI.getObjectOffset(FI) - getOffsetOfLocalArea();
+  const M68kMachineFunctionInfo *MMFI = MF.getInfo<M68kMachineFunctionInfo>();
+  uint64_t StackSize = MFI.getStackSize();
+  bool HasFP = hasFP(MF);
+
+  // TODO: Support tail calls
+  if (TRI->hasBasePointer(MF)) {
+    assert(HasFP && "VLAs and dynamic stack realign, but no FP?!");
+    if (FI < 0) {
+      // Skip the saved FP.
+      return StackOffset::getFixed(Offset + SlotSize);
+    }
+
+    assert((-(Offset + StackSize)) % MFI.getObjectAlign(FI).value() == 0);
+    return StackOffset::getFixed(Offset + StackSize);
+  }
+  if (TRI->needsStackRealignment(MF)) {
+    if (FI < 0) {
+      // Skip the saved FP.
+      return StackOffset::getFixed(Offset + SlotSize);
+    }
+
+    assert((-(Offset + StackSize)) % MFI.getObjectAlign(FI).value() == 0);
+    return StackOffset::getFixed(Offset + StackSize);
+  }
+
+  if (!HasFP)
+    return StackOffset::getFixed(Offset + StackSize);
+
+  // Skip the saved FP.
+  Offset += SlotSize;
+
+  // Skip the RETADDR move area
+  int TailCallReturnAddrDelta = MMFI->getTCReturnAddrDelta();
+  if (TailCallReturnAddrDelta < 0)
+    Offset -= TailCallReturnAddrDelta;
+
+  return StackOffset::getFixed(Offset);
+}
+
+/// Return a caller-saved register that isn't live
+/// when it reaches the "return" instruction. We can then pop a stack object
+/// to this register without worry about clobbering it.
+static unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB,
+                                       MachineBasicBlock::iterator &MBBI,
+                                       const M68kRegisterInfo *TRI) {
+  const MachineFunction *MF = MBB.getParent();
+  if (MF->callsEHReturn())
+    return 0;
+
+  const TargetRegisterClass &AvailableRegs = *TRI->getRegsForTailCall(*MF);
+
+  if (MBBI == MBB.end())
+    return 0;
+
+  switch (MBBI->getOpcode()) {
+  default:
+    return 0;
+  case TargetOpcode::PATCHABLE_RET:
+  case M68k::RET: {
+    SmallSet<uint16_t, 8> Uses;
+
+    for (unsigned i = 0, e = MBBI->getNumOperands(); i != e; ++i) {
+      MachineOperand &MO = MBBI->getOperand(i);
+      if (!MO.isReg() || MO.isDef())
+        continue;
+      unsigned Reg = MO.getReg();
+      if (!Reg)
+        continue;
+      for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
+        Uses.insert(*AI);
+    }
+
+    for (auto CS : AvailableRegs)
+      if (!Uses.count(CS))
+        return CS;
+  }
+  }
+
+  return 0;
+}
+
+static bool isRegLiveIn(MachineBasicBlock &MBB, unsigned Reg) {
+  return llvm::any_of(MBB.liveins(),
+                      [Reg](MachineBasicBlock::RegisterMaskPair RegMask) {
+                        return RegMask.PhysReg == Reg;
+                      });
+}
+
+uint64_t
+M68kFrameLowering::calculateMaxStackAlign(const MachineFunction &MF) const {
+  const MachineFrameInfo &MFI = MF.getFrameInfo();
+  uint64_t MaxAlign = MFI.getMaxAlign().value(); // Desired stack alignment.
+  unsigned StackAlign = getStackAlignment();     // ABI alignment
+  if (MF.getFunction().hasFnAttribute("stackrealign")) {
+    if (MFI.hasCalls())
+      MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
+    else if (MaxAlign < SlotSize)
+      MaxAlign = SlotSize;
+  }
+  return MaxAlign;
+}
+
+void M68kFrameLowering::BuildStackAlignAND(MachineBasicBlock &MBB,
+                                           MachineBasicBlock::iterator MBBI,
+                                           const DebugLoc &DL, unsigned Reg,
+                                           uint64_t MaxAlign) const {
+  uint64_t Val = -MaxAlign;
+  unsigned AndOp = M68k::AND32di;
+  unsigned MovOp = M68k::MOV32rr;
+
+  // This function is normally used with SP which is Address Register, but AND,
+  // or any other logical instructions in M68k do not support ARs so we need
+  // to use a temp Data Register to perform the op.
+  unsigned Tmp = M68k::D0;
+
+  BuildMI(MBB, MBBI, DL, TII.get(MovOp), Tmp)
+      .addReg(Reg)
+      .setMIFlag(MachineInstr::FrameSetup);
+
+  MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(AndOp), Tmp)
+                         .addReg(Tmp)
+                         .addImm(Val)
+                         .setMIFlag(MachineInstr::FrameSetup);
+
+  // The CCR implicit def is dead.
+  MI->getOperand(3).setIsDead();
+
+  BuildMI(MBB, MBBI, DL, TII.get(MovOp), Reg)
+      .addReg(Tmp)
+      .setMIFlag(MachineInstr::FrameSetup);
+}
+
+MachineBasicBlock::iterator M68kFrameLowering::eliminateCallFramePseudoInstr(
+    MachineFunction &MF, MachineBasicBlock &MBB,
+    MachineBasicBlock::iterator I) const {
+  bool ReserveCallFrame = hasReservedCallFrame(MF);
+  unsigned Opcode = I->getOpcode();
+  bool IsDestroy = Opcode == TII.getCallFrameDestroyOpcode();
+  DebugLoc DL = I->getDebugLoc();
+  uint64_t Amount = !ReserveCallFrame ? I->getOperand(0).getImm() : 0;
+  uint64_t InternalAmt = (IsDestroy && Amount) ? I->getOperand(1).getImm() : 0;
+  I = MBB.erase(I);
+
+  if (!ReserveCallFrame) {
+    // If the stack pointer can be changed after prologue, turn the
+    // adjcallstackup instruction into a 'sub %SP, <amt>' and the
+    // adjcallstackdown instruction into 'add %SP, <amt>'
+
+    // We need to keep the stack aligned properly.  To do this, we round the
+    // amount of space needed for the outgoing arguments up to the next
+    // alignment boundary.
+    unsigned StackAlign = getStackAlignment();
+    Amount = alignTo(Amount, StackAlign);
+
+    MachineModuleInfo &MMI = MF.getMMI();
+    const auto &Fn = MF.getFunction();
+    bool DwarfCFI = MMI.hasDebugInfo() || Fn.needsUnwindTableEntry();
+
+    // If we have any exception handlers in this function, and we adjust
+    // the SP before calls, we may need to indicate this to the unwinder
+    // using GNU_ARGS_SIZE. Note that this may be necessary even when
+    // Amount == 0, because the preceding function may have set a non-0
+    // GNU_ARGS_SIZE.
+    // TODO: We don't need to reset this between subsequent functions,
+    // if it didn't change.
+    bool HasDwarfEHHandlers = !MF.getLandingPads().empty();
+
+    if (HasDwarfEHHandlers && !IsDestroy &&
+        MF.getInfo<M68kMachineFunctionInfo>()->getHasPushSequences()) {
+      BuildCFI(MBB, I, DL,
+               MCCFIInstruction::createGnuArgsSize(nullptr, Amount));
+    }
+
+    if (Amount == 0)
+      return I;
+
+    // Factor out the amount that gets handled inside the sequence
+    // (Pushes of argument for frame setup, callee pops for frame destroy)
+    Amount -= InternalAmt;
+
+    // TODO: This is needed only if we require precise CFA.
+    // If this is a callee-pop calling convention, emit a CFA adjust for
+    // the amount the callee popped.
+    if (IsDestroy && InternalAmt && DwarfCFI && !hasFP(MF))
+      BuildCFI(MBB, I, DL,
+               MCCFIInstruction::createAdjustCfaOffset(nullptr, -InternalAmt));
+
+    // Add Amount to SP to destroy a frame, or subtract to setup.
+    int64_t StackAdjustment = IsDestroy ? Amount : -Amount;
+    int64_t CfaAdjustment = -StackAdjustment;
+
+    if (StackAdjustment) {
+      // Merge with any previous or following adjustment instruction. Note: the
+      // instructions merged with here do not have CFI, so their stack
+      // adjustments do not feed into CfaAdjustment.
+      StackAdjustment += mergeSPUpdates(MBB, I, true);
+      StackAdjustment += mergeSPUpdates(MBB, I, false);
+
+      if (StackAdjustment) {
+        BuildStackAdjustment(MBB, I, DL, StackAdjustment, false);
+      }
+    }
+
+    if (DwarfCFI && !hasFP(MF)) {
+      // If we don't have FP, but need to generate unwind information,
+      // we need to set the correct CFA offset after the stack adjustment.
+      // How much we adjust the CFA offset depends on whether we're emitting
+      // CFI only for EH purposes or for debugging. EH only requires the CFA
+      // offset to be correct at each call site, while for debugging we want
+      // it to be more precise.
+
+      // TODO: When not using precise CFA, we also need to adjust for the
+      // InternalAmt here.
+      if (CfaAdjustment) {
+        BuildCFI(
+            MBB, I, DL,
+            MCCFIInstruction::createAdjustCfaOffset(nullptr, CfaAdjustment));
+      }
+    }
+
+    return I;
+  }
+
+  if (IsDestroy && InternalAmt) {
+    // If we are performing frame pointer elimination and if the callee pops
+    // something off the stack pointer, add it back.  We do this until we have
+    // more advanced stack pointer tracking ability.
+    // We are not tracking the stack pointer adjustment by the callee, so make
+    // sure we restore the stack pointer immediately after the call, there may
+    // be spill code inserted between the CALL and ADJCALLSTACKUP instructions.
+    MachineBasicBlock::iterator CI = I;
+    MachineBasicBlock::iterator B = MBB.begin();
+    while (CI != B && !std::prev(CI)->isCall())
+      --CI;
+    BuildStackAdjustment(MBB, CI, DL, -InternalAmt, /*InEpilogue=*/false);
+  }
+
+  return I;
+}
+
+/// Emit a series of instructions to increment / decrement the stack pointer by
+/// a constant value.
+void M68kFrameLowering::emitSPUpdate(MachineBasicBlock &MBB,
+                                     MachineBasicBlock::iterator &MBBI,
+                                     int64_t NumBytes, bool InEpilogue) const {
+  bool IsSub = NumBytes < 0;
+  uint64_t Offset = IsSub ? -NumBytes : NumBytes;
+
+  uint64_t Chunk = (1LL << 31) - 1;
+  DebugLoc DL = MBB.findDebugLoc(MBBI);
+
+  while (Offset) {
+    if (Offset > Chunk) {
+      // Rather than emit a long series of instructions for large offsets,
+      // load the offset into a register and do one sub/add
+      Register Reg;
+
+      if (IsSub && !isRegLiveIn(MBB, M68k::D0))
+        Reg = M68k::D0;
+      else
+        Reg = findDeadCallerSavedReg(MBB, MBBI, TRI);
+
+      if (Reg) {
+        unsigned Opc = M68k::MOV32ri;
+        BuildMI(MBB, MBBI, DL, TII.get(Opc), Reg).addImm(Offset);
+        Opc = IsSub ? M68k::SUB32rr : M68k::ADD32rr;
+        MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
+                               .addReg(StackPtr)
+                               .addReg(Reg);
+        // ??? still no CCR
+        MI->getOperand(3).setIsDead(); // The CCR implicit def is dead.
+        Offset = 0;
+        continue;
+      }
+    }
+
+    uint64_t ThisVal = std::min(Offset, Chunk);
+
+    MachineInstrBuilder MI = BuildStackAdjustment(
+        MBB, MBBI, DL, IsSub ? -ThisVal : ThisVal, InEpilogue);
+    if (IsSub)
+      MI.setMIFlag(MachineInstr::FrameSetup);
+    else
+      MI.setMIFlag(MachineInstr::FrameDestroy);
+
+    Offset -= ThisVal;
+  }
+}
+
+int M68kFrameLowering::mergeSPUpdates(MachineBasicBlock &MBB,
+                                      MachineBasicBlock::iterator &MBBI,
+                                      bool MergeWithPrevious) const {
+  if ((MergeWithPrevious && MBBI == MBB.begin()) ||
+      (!MergeWithPrevious && MBBI == MBB.end()))
+    return 0;
+
+  MachineBasicBlock::iterator PI = MergeWithPrevious ? std::prev(MBBI) : MBBI;
+  MachineBasicBlock::iterator NI =
+      MergeWithPrevious ? nullptr : std::next(MBBI);
+  unsigned Opc = PI->getOpcode();
+  int Offset = 0;
+
+  if (!MergeWithPrevious && NI != MBB.end() &&
+      NI->getOpcode() == TargetOpcode::CFI_INSTRUCTION) {
+    // Don't merge with the next instruction if it has CFI.
+    return Offset;
+  }
+
+  if (Opc == M68k::ADD32ri && PI->getOperand(0).getReg() == StackPtr) {
+    assert(PI->getOperand(1).getReg() == StackPtr);
+    Offset += PI->getOperand(2).getImm();
+    MBB.erase(PI);
+    if (!MergeWithPrevious)
+      MBBI = NI;
+  } else if (Opc == M68k::SUB32ri && PI->getOperand(0).getReg() == StackPtr) {
+    assert(PI->getOperand(1).getReg() == StackPtr);
+    Offset -= PI->getOperand(2).getImm();
+    MBB.erase(PI);
+    if (!MergeWithPrevious)
+      MBBI = NI;
+  }
+
+  return Offset;
+}
+
+MachineInstrBuilder M68kFrameLowering::BuildStackAdjustment(
+    MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+    const DebugLoc &DL, int64_t Offset, bool InEpilogue) const {
+  assert(Offset != 0 && "zero offset stack adjustment requested");
+
+  // TODO can `lea` be used to adjust stack?
+
+  bool IsSub = Offset < 0;
+  uint64_t AbsOffset = IsSub ? -Offset : Offset;
+  unsigned Opc = IsSub ? M68k::SUB32ri : M68k::ADD32ri;
+
+  MachineInstrBuilder MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
+                               .addReg(StackPtr)
+                               .addImm(AbsOffset);
+  // FIXME Update CCR as well. For now we just
+  // conservatively say CCR implicit def is dead
+  MI->getOperand(3).setIsDead();
+  return MI;
+}
+
+void M68kFrameLowering::BuildCFI(MachineBasicBlock &MBB,
+                                 MachineBasicBlock::iterator MBBI,
+                                 const DebugLoc &DL,
+                                 const MCCFIInstruction &CFIInst) const {
+  MachineFunction &MF = *MBB.getParent();
+  unsigned CFIIndex = MF.addFrameInst(CFIInst);
+  BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
+      .addCFIIndex(CFIIndex);
+}
+
+void M68kFrameLowering::emitCalleeSavedFrameMoves(
+    MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+    const DebugLoc &DL) const {
+  MachineFunction &MF = *MBB.getParent();
+  MachineFrameInfo &MFI = MF.getFrameInfo();
+  MachineModuleInfo &MMI = MF.getMMI();
+  const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
+
+  // Add callee saved registers to move list.
+  const auto &CSI = MFI.getCalleeSavedInfo();
+  if (CSI.empty())
+    return;
+
+  // Calculate offsets.
+  for (const auto &I : CSI) {
+    int64_t Offset = MFI.getObjectOffset(I.getFrameIdx());
+    unsigned Reg = I.getReg();
+
+    unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
+    BuildCFI(MBB, MBBI, DL,
+             MCCFIInstruction::createOffset(nullptr, DwarfReg, Offset));
+  }
+}
+
+void M68kFrameLowering::emitPrologue(MachineFunction &MF,
+                                     MachineBasicBlock &MBB) const {
+  assert(&STI == &MF.getSubtarget<M68kSubtarget>() &&
+         "MF used frame lowering for wrong subtarget");
+
+  MachineBasicBlock::iterator MBBI = MBB.begin();
+  MachineFrameInfo &MFI = MF.getFrameInfo();
+  const auto &Fn = MF.getFunction();
+  MachineModuleInfo &MMI = MF.getMMI();
+  M68kMachineFunctionInfo *MMFI = MF.getInfo<M68kMachineFunctionInfo>();
+  uint64_t MaxAlign = calculateMaxStackAlign(MF); // Desired stack alignment.
+  uint64_t StackSize = MFI.getStackSize(); // Number of bytes to allocate.
+  bool HasFP = hasFP(MF);
+  bool NeedsDwarfCFI = MMI.hasDebugInfo() || Fn.needsUnwindTableEntry();
+  unsigned FramePtr = TRI->getFrameRegister(MF);
+  const unsigned MachineFramePtr = FramePtr;
+  unsigned BasePtr = TRI->getBaseRegister();
+
+  // Debug location must be unknown since the first debug location is used
+  // to determine the end of the prologue.
+  DebugLoc DL;
+
+  // Add RETADDR move area to callee saved frame size.
+  int TailCallReturnAddrDelta = MMFI->getTCReturnAddrDelta();
+
+  if (TailCallReturnAddrDelta < 0) {
+    MMFI->setCalleeSavedFrameSize(MMFI->getCalleeSavedFrameSize() -
+                                  TailCallReturnAddrDelta);
+  }
+
+  // Insert stack pointer adjustment for later moving of return addr.  Only
+  // applies to tail call optimized functions where the callee argument stack
+  // size is bigger than the callers.
+  if (TailCallReturnAddrDelta < 0) {
+    BuildStackAdjustment(MBB, MBBI, DL, TailCallReturnAddrDelta,
+                         /*InEpilogue=*/false)
+        .setMIFlag(MachineInstr::FrameSetup);
+  }
+
+  // Mapping for machine moves:
+  //
+  //   DST: VirtualFP AND
+  //        SRC: VirtualFP              => DW_CFA_def_cfa_offset
+  //        ELSE                        => DW_CFA_def_cfa
+  //
+  //   SRC: VirtualFP AND
+  //        DST: Register               => DW_CFA_def_cfa_register
+  //
+  //   ELSE
+  //        OFFSET < 0                  => DW_CFA_offset_extended_sf
+  //        REG < 64                    => DW_CFA_offset + Reg
+  //        ELSE                        => DW_CFA_offset_extended
+
+  uint64_t NumBytes = 0;
+  int stackGrowth = -SlotSize;
+
+  if (HasFP) {
+    // Calculate required stack adjustment.
+    uint64_t FrameSize = StackSize - SlotSize;
+    // If required, include space for extra hidden slot for stashing base
+    // pointer.
+    if (MMFI->getRestoreBasePointer())
+      FrameSize += SlotSize;
+
+    NumBytes = FrameSize - MMFI->getCalleeSavedFrameSize();
+
+    // Callee-saved registers are pushed on stack before the stack is realigned.
+    if (TRI->needsStackRealignment(MF))
+      NumBytes = alignTo(NumBytes, MaxAlign);
+
+    // Get the offset of the stack slot for the FP register, which is
+    // guaranteed to be the last slot by processFunctionBeforeFrameFinalized.
+    // Update the frame offset adjustment.
+    MFI.setOffsetAdjustment(-NumBytes);
+
+    // Save FP into the appropriate stack slot.
+    BuildMI(MBB, MBBI, DL, TII.get(M68k::PUSH32r))
+        .addReg(MachineFramePtr, RegState::Kill)
+        .setMIFlag(MachineInstr::FrameSetup);
+
+    if (NeedsDwarfCFI) {
+      // Mark the place where FP was saved.
+      // Define the current CFA rule to use the provided offset.
+      assert(StackSize);
+      BuildCFI(MBB, MBBI, DL,
+               MCCFIInstruction::cfiDefCfaOffset(nullptr, 2 * stackGrowth));
+
+      // Change the rule for the FramePtr to be an "offset" rule.
+      int DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true);
+      assert(DwarfFramePtr > 0);
+      BuildCFI(MBB, MBBI, DL,
+               MCCFIInstruction::createOffset(nullptr, DwarfFramePtr,
+                                              2 * stackGrowth));
+    }
+
+    // Update FP with the new base value.
+    BuildMI(MBB, MBBI, DL, TII.get(M68k::MOV32aa), FramePtr)
+        .addReg(StackPtr)
+        .setMIFlag(MachineInstr::FrameSetup);
+
+    if (NeedsDwarfCFI) {
+      // Mark effective beginning of when frame pointer becomes valid.
+      // Define the current CFA to use the FP register.
+      unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true);
+      BuildCFI(MBB, MBBI, DL,
+               MCCFIInstruction::createDefCfaRegister(nullptr, DwarfFramePtr));
+    }
+
+    // Mark the FramePtr as live-in in every block. Don't do this again for
+    // funclet prologues.
+    for (MachineBasicBlock &EveryMBB : MF)
+      EveryMBB.addLiveIn(MachineFramePtr);
+  } else {
+    NumBytes = StackSize - MMFI->getCalleeSavedFrameSize();
+  }
+
+  // Skip the callee-saved push instructions.
+  bool PushedRegs = false;
+  int StackOffset = 2 * stackGrowth;
+
+  while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup) &&
+         MBBI->getOpcode() == M68k::PUSH32r) {
+    PushedRegs = true;
+    ++MBBI;
+
+    if (!HasFP && NeedsDwarfCFI) {
+      // Mark callee-saved push instruction.
+      // Define the current CFA rule to use the provided offset.
+      assert(StackSize);
+      BuildCFI(MBB, MBBI, DL,
+               MCCFIInstruction::cfiDefCfaOffset(nullptr, StackOffset));
+      StackOffset += stackGrowth;
+    }
+  }
+
+  // Realign stack after we pushed callee-saved registers (so that we'll be
+  // able to calculate their offsets from the frame pointer).
+  if (TRI->needsStackRealignment(MF)) {
+    assert(HasFP && "There should be a frame pointer if stack is realigned.");
+    BuildStackAlignAND(MBB, MBBI, DL, StackPtr, MaxAlign);
+  }
+
+  // If there is an SUB32ri of SP immediately before this instruction, merge
+  // the two. This can be the case when tail call elimination is enabled and
+  // the callee has more arguments then the caller.
+  NumBytes -= mergeSPUpdates(MBB, MBBI, true);
+
+  // Adjust stack pointer: ESP -= numbytes.
+  emitSPUpdate(MBB, MBBI, -(int64_t)NumBytes, /*InEpilogue=*/false);
+
+  unsigned SPOrEstablisher = StackPtr;
+
+  // If we need a base pointer, set it up here. It's whatever the value
+  // of the stack pointer is at this point. Any variable size objects
+  // will be allocated after this, so we can still use the base pointer
+  // to reference locals.
+  if (TRI->hasBasePointer(MF)) {
+    // Update the base pointer with the current stack pointer.
+    BuildMI(MBB, MBBI, DL, TII.get(M68k::MOV32aa), BasePtr)
+        .addReg(SPOrEstablisher)
+        .setMIFlag(MachineInstr::FrameSetup);
+    if (MMFI->getRestoreBasePointer()) {
+      // Stash value of base pointer.  Saving SP instead of FP shortens
+      // dependence chain. Used by SjLj EH.
+      unsigned Opm = M68k::MOV32ja;
+      M68k::addRegIndirectWithDisp(BuildMI(MBB, MBBI, DL, TII.get(Opm)),
+                                   FramePtr, true,
+                                   MMFI->getRestoreBasePointerOffset())
+          .addReg(SPOrEstablisher)
+          .setMIFlag(MachineInstr::FrameSetup);
+    }
+  }
+
+  if (((!HasFP && NumBytes) || PushedRegs) && NeedsDwarfCFI) {
+    // Mark end of stack pointer adjustment.
+    if (!HasFP && NumBytes) {
+      // Define the current CFA rule to use the provided offset.
+      assert(StackSize);
+      BuildCFI(
+          MBB, MBBI, DL,
+          MCCFIInstruction::cfiDefCfaOffset(nullptr, -StackSize + stackGrowth));
+    }
+
+    // Emit DWARF info specifying the offsets of the callee-saved registers.
+    if (PushedRegs)
+      emitCalleeSavedFrameMoves(MBB, MBBI, DL);
+  }
+
+  // TODO Interrupt handlers
+  // M68k Interrupt handling function cannot assume anything about the
+  // direction flag (DF in CCR register). Clear this flag by creating "cld"
+  // instruction in each prologue of interrupt handler function. The "cld"
+  // instruction should only in these cases:
+  // 1. The interrupt handling function uses any of the "rep" instructions.
+  // 2. Interrupt handling function calls another function.
+}
+
+static bool isTailCallOpcode(unsigned Opc) {
+  return Opc == M68k::TCRETURNj || Opc == M68k::TCRETURNq;
+}
+
+void M68kFrameLowering::emitEpilogue(MachineFunction &MF,
+                                     MachineBasicBlock &MBB) const {
+  const MachineFrameInfo &MFI = MF.getFrameInfo();
+  M68kMachineFunctionInfo *MMFI = MF.getInfo<M68kMachineFunctionInfo>();
+  MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
+  Optional<unsigned> RetOpcode;
+  if (MBBI != MBB.end())
+    RetOpcode = MBBI->getOpcode();
+  DebugLoc DL;
+  if (MBBI != MBB.end())
+    DL = MBBI->getDebugLoc();
+  unsigned FramePtr = TRI->getFrameRegister(MF);
+  unsigned MachineFramePtr = FramePtr;
+
+  // Get the number of bytes to allocate from the FrameInfo.
+  uint64_t StackSize = MFI.getStackSize();
+  uint64_t MaxAlign = calculateMaxStackAlign(MF);
+  unsigned CSSize = MMFI->getCalleeSavedFrameSize();
+  uint64_t NumBytes = 0;
+
+  if (hasFP(MF)) {
+    // Calculate required stack adjustment.
+    uint64_t FrameSize = StackSize - SlotSize;
+    NumBytes = FrameSize - CSSize;
+
+    // Callee-saved registers were pushed on stack before the stack was
+    // realigned.
+    if (TRI->needsStackRealignment(MF))
+      NumBytes = alignTo(FrameSize, MaxAlign);
+
+    // Pop FP.
+    BuildMI(MBB, MBBI, DL, TII.get(M68k::POP32r), MachineFramePtr)
+        .setMIFlag(MachineInstr::FrameDestroy);
+  } else {
+    NumBytes = StackSize - CSSize;
+  }
+
+  // Skip the callee-saved pop instructions.
+  while (MBBI != MBB.begin()) {
+    MachineBasicBlock::iterator PI = std::prev(MBBI);
+    unsigned Opc = PI->getOpcode();
+
+    if ((Opc != M68k::POP32r || !PI->getFlag(MachineInstr::FrameDestroy)) &&
+        Opc != M68k::DBG_VALUE && !PI->isTerminator())
+      break;
+
+    --MBBI;
+  }
+  MachineBasicBlock::iterator FirstCSPop = MBBI;
+
+  if (MBBI != MBB.end())
+    DL = MBBI->getDebugLoc();
+
+  // If there is an ADD32ri or SUB32ri of SP immediately before this
+  // instruction, merge the two instructions.
+  if (NumBytes || MFI.hasVarSizedObjects())
+    NumBytes += mergeSPUpdates(MBB, MBBI, true);
+
+  // If dynamic alloca is used, then reset SP to point to the last callee-saved
+  // slot before popping them off! Same applies for the case, when stack was
+  // realigned. Don't do this if this was a funclet epilogue, since the funclets
+  // will not do realignment or dynamic stack allocation.
+  if ((TRI->needsStackRealignment(MF) || MFI.hasVarSizedObjects())) {
+    if (TRI->needsStackRealignment(MF))
+      MBBI = FirstCSPop;
+    uint64_t LEAAmount = -CSSize;
+
+    // 'move %FramePtr, SP' will not be recognized as an epilogue sequence.
+    // However, we may use this sequence if we have a frame pointer because the
+    // effects of the prologue can safely be undone.
+    if (LEAAmount != 0) {
+      unsigned Opc = M68k::LEA32p;
+      M68k::addRegIndirectWithDisp(
+          BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr), FramePtr, false,
+          LEAAmount);
+      --MBBI;
+    } else {
+      unsigned Opc = (M68k::MOV32rr);
+      BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr).addReg(FramePtr);
+      --MBBI;
+    }
+  } else if (NumBytes) {
+    // Adjust stack pointer back: SP += numbytes.
+    emitSPUpdate(MBB, MBBI, NumBytes, /*InEpilogue=*/true);
+    --MBBI;
+  }
+
+  if (!RetOpcode || !isTailCallOpcode(*RetOpcode)) {
+    // Add the return addr area delta back since we are not tail calling.
+    int Offset = -1 * MMFI->getTCReturnAddrDelta();
+    assert(Offset >= 0 && "TCDelta should never be positive");
+    if (Offset) {
+      MBBI = MBB.getFirstTerminator();
+
+      // Check for possible merge with preceding ADD instruction.
+      Offset += mergeSPUpdates(MBB, MBBI, true);
+      emitSPUpdate(MBB, MBBI, Offset, /*InEpilogue=*/true);
+    }
+  }
+}
+
+void M68kFrameLowering::determineCalleeSaves(MachineFunction &MF,
+                                             BitVector &SavedRegs,
+                                             RegScavenger *RS) const {
+  TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
+
+  MachineFrameInfo &MFI = MF.getFrameInfo();
+
+  M68kMachineFunctionInfo *M68kFI = MF.getInfo<M68kMachineFunctionInfo>();
+  int64_t TailCallReturnAddrDelta = M68kFI->getTCReturnAddrDelta();
+
+  if (TailCallReturnAddrDelta < 0) {
+    // create RETURNADDR area
+    //   arg
+    //   arg
+    //   RETADDR
+    //   { ...
+    //     RETADDR area
+    //     ...
+    //   }
+    //   [FP]
+    MFI.CreateFixedObject(-TailCallReturnAddrDelta,
+                          TailCallReturnAddrDelta - SlotSize, true);
+  }
+
+  // Spill the BasePtr if it's used.
+  if (TRI->hasBasePointer(MF)) {
+    SavedRegs.set(TRI->getBaseRegister());
+  }
+}
+
+bool M68kFrameLowering::assignCalleeSavedSpillSlots(
+    MachineFunction &MF, const TargetRegisterInfo *TRI,
+    std::vector<CalleeSavedInfo> &CSI) const {
+  MachineFrameInfo &MFI = MF.getFrameInfo();
+  M68kMachineFunctionInfo *M68kFI = MF.getInfo<M68kMachineFunctionInfo>();
+
+  int SpillSlotOffset = getOffsetOfLocalArea() + M68kFI->getTCReturnAddrDelta();
+
+  if (hasFP(MF)) {
+    // emitPrologue always spills frame register the first thing.
+    SpillSlotOffset -= SlotSize;
+    MFI.CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
+
+    // Since emitPrologue and emitEpilogue will handle spilling and restoring of
+    // the frame register, we can delete it from CSI list and not have to worry
+    // about avoiding it later.
+    unsigned FPReg = TRI->getFrameRegister(MF);
+    for (unsigned i = 0, e = CSI.size(); i < e; ++i) {
+      if (TRI->regsOverlap(CSI[i].getReg(), FPReg)) {
+        CSI.erase(CSI.begin() + i);
+        break;
+      }
+    }
+  }
+
+  // The rest is fine
+  return false;
+}
+
+bool M68kFrameLowering::spillCalleeSavedRegisters(
+    MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
+    ArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const {
+  auto &MRI = *static_cast<const M68kRegisterInfo *>(TRI);
+  auto DL = MBB.findDebugLoc(MI);
+
+  int FI = 0;
+  unsigned Mask = 0;
+  for (const auto &Info : CSI) {
+    FI = std::max(FI, Info.getFrameIdx());
+    unsigned Reg = Info.getReg();
+    unsigned Shift = MRI.getSpillRegisterOrder(Reg);
+    Mask |= 1 << Shift;
+  }
+
+  auto I =
+      M68k::addFrameReference(BuildMI(MBB, MI, DL, TII.get(M68k::MOVM32pm)), FI)
+          .addImm(Mask)
+          .setMIFlag(MachineInstr::FrameSetup);
+
+  // Append implicit registers and mem locations
+  const MachineFunction &MF = *MBB.getParent();
+  const MachineRegisterInfo &RI = MF.getRegInfo();
+  for (const auto &Info : CSI) {
+    unsigned Reg = Info.getReg();
+    bool IsLiveIn = RI.isLiveIn(Reg);
+    if (!IsLiveIn)
+      MBB.addLiveIn(Reg);
+    I.addReg(Reg, IsLiveIn ? RegState::Implicit : RegState::ImplicitKill);
+    M68k::addMemOperand(I, Info.getFrameIdx(), 0);
+  }
+
+  return true;
+}
+
+bool M68kFrameLowering::restoreCalleeSavedRegisters(
+    MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
+    MutableArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const {
+  auto &MRI = *static_cast<const M68kRegisterInfo *>(TRI);
+  auto DL = MBB.findDebugLoc(MI);
+
+  int FI = 0;
+  unsigned Mask = 0;
+  for (const auto &Info : CSI) {
+    FI = std::max(FI, Info.getFrameIdx());
+    unsigned Reg = Info.getReg();
+    unsigned Shift = MRI.getSpillRegisterOrder(Reg);
+    Mask |= 1 << Shift;
+  }
+
+  auto I = M68k::addFrameReference(
+               BuildMI(MBB, MI, DL, TII.get(M68k::MOVM32mp)).addImm(Mask), FI)
+               .setMIFlag(MachineInstr::FrameDestroy);
+
+  // Append implicit registers and mem locations
+  for (const auto &Info : CSI) {
+    I.addReg(Info.getReg(), RegState::ImplicitDefine);
+    M68k::addMemOperand(I, Info.getFrameIdx(), 0);
+  }
+
+  return true;
+}

diff  --git a/llvm/lib/Target/M68k/M68kFrameLowering.h b/llvm/lib/Target/M68k/M68kFrameLowering.h
new file mode 100644
index 000000000000..f9236b4af0b0
--- /dev/null
+++ b/llvm/lib/Target/M68k/M68kFrameLowering.h
@@ -0,0 +1,172 @@
+//===- M68kFrameLowering.h - Define frame lowering for M68k -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains the M68k declaration of TargetFrameLowering class.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_M68K_M68KFRAMELOWERING_H
+#define LLVM_LIB_TARGET_M68K_M68KFRAMELOWERING_H
+
+#include "M68k.h"
+
+#include "llvm/CodeGen/TargetFrameLowering.h"
+
+namespace llvm {
+class MachineInstrBuilder;
+class MCCFIInstruction;
+class M68kSubtarget;
+class M68kRegisterInfo;
+struct Align;
+
+class M68kFrameLowering : public TargetFrameLowering {
+  // Cached subtarget predicates.
+  const M68kSubtarget &STI;
+  const TargetInstrInfo &TII;
+  const M68kRegisterInfo *TRI;
+
+  /// Stack slot size in bytes.
+  unsigned SlotSize;
+
+  unsigned StackPtr;
+
+  /// If we're forcing a stack realignment we can't rely on just the frame
+  /// info, we need to know the ABI stack alignment as well in case we have a
+  /// call out.  Otherwise just make sure we have some alignment - we'll go
+  /// with the minimum SlotSize.
+  uint64_t calculateMaxStackAlign(const MachineFunction &MF) const;
+
+  /// Adjusts the stack pointer using LEA, SUB, or ADD.
+  MachineInstrBuilder BuildStackAdjustment(MachineBasicBlock &MBB,
+                                           MachineBasicBlock::iterator MBBI,
+                                           const DebugLoc &DL, int64_t Offset,
+                                           bool InEpilogue) const;
+
+  /// Aligns the stack pointer by ANDing it with -MaxAlign.
+  void BuildStackAlignAND(MachineBasicBlock &MBB,
+                          MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
+                          unsigned Reg, uint64_t MaxAlign) const;
+
+  /// Wraps up getting a CFI index and building a MachineInstr for it.
+  void BuildCFI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+                const DebugLoc &DL, const MCCFIInstruction &CFIInst) const;
+
+  void emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,
+                                 MachineBasicBlock::iterator MBBI,
+                                 const DebugLoc &DL) const;
+
+  unsigned getPSPSlotOffsetFromSP(const MachineFunction &MF) const;
+
+public:
+  explicit M68kFrameLowering(const M68kSubtarget &sti, Align Alignment);
+
+  static const M68kFrameLowering *create(const M68kSubtarget &ST);
+
+  /// This method is called during prolog/epilog code insertion to eliminate
+  /// call frame setup and destroy pseudo instructions (but only if the Target
+  /// is using them).  It is responsible for eliminating these instructions,
+  /// replacing them with concrete instructions.  This method need only be
+  /// implemented if using call frame setup/destroy pseudo instructions.
+  /// Returns an iterator pointing to the instruction after the replaced one.
+  MachineBasicBlock::iterator
+  eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
+                                MachineBasicBlock::iterator MI) const override;
+
+  /// Insert prolog code into the function.
+  void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
+
+  /// Insert epilog code into the function.
+  void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
+
+  /// This method determines which of the registers reported by
+  /// TargetRegisterInfo::getCalleeSavedRegs() should actually get saved.
+  /// The default implementation checks populates the \p SavedRegs bitset with
+  /// all registers which are modified in the function, targets may override
+  /// this function to save additional registers.
+  /// This method also sets up the register scavenger ensuring there is a free
+  /// register or a frameindex available.
+  void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs,
+                            RegScavenger *RS = nullptr) const override;
+
+  /// Allows target to override spill slot assignment logic.  If implemented,
+  /// assignCalleeSavedSpillSlots() should assign frame slots to all CSI
+  /// entries and return true.  If this method returns false, spill slots will
+  /// be assigned using generic implementation.  assignCalleeSavedSpillSlots()
+  /// may add, delete or rearrange elements of CSI.
+  bool
+  assignCalleeSavedSpillSlots(MachineFunction &MF,
+                              const TargetRegisterInfo *TRI,
+                              std::vector<CalleeSavedInfo> &CSI) const override;
+
+  /// Issues instruction(s) to spill all callee saved registers and returns
+  /// true if it isn't possible / profitable to do so by issuing a series of
+  /// store instructions via storeRegToStackSlot(). Returns false otherwise.
+  bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
+                                 MachineBasicBlock::iterator MI,
+                                 ArrayRef<CalleeSavedInfo> CSI,
+                                 const TargetRegisterInfo *TRI) const override;
+
+  /// Issues instruction(s) to restore all callee saved registers and returns
+  /// true if it isn't possible / profitable to do so by issuing a series of
+  /// load instructions via loadRegToStackSlot().  Returns false otherwise.
+  bool
+  restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
+                              MachineBasicBlock::iterator MI,
+                              MutableArrayRef<CalleeSavedInfo> CSI,
+                              const TargetRegisterInfo *TRI) const override;
+
+  /// Return true if the specified function should have a dedicated frame
+  /// pointer register.  This is true if the function has variable sized
+  /// allocas, if it needs dynamic stack realignment, if frame pointer
+  /// elimination is disabled, or if the frame address is taken.
+  bool hasFP(const MachineFunction &MF) const override;
+
+  /// Under normal circumstances, when a frame pointer is not required, we
+  /// reserve argument space for call sites in the function immediately on
+  /// entry to the current function. This eliminates the need for add/sub sp
+  /// brackets around call sites. Returns true if the call frame is included as
+  /// part of the stack frame.
+  bool hasReservedCallFrame(const MachineFunction &MF) const override;
+
+  /// If there is a reserved call frame, the call frame pseudos can be
+  /// simplified.  Having a FP, as in the default implementation, is not
+  /// sufficient here since we can't always use it.  Use a more nuanced
+  /// condition.
+  bool canSimplifyCallFramePseudos(const MachineFunction &MF) const override;
+
+  // Do we need to perform FI resolution for this function. Normally, this is
+  // required only when the function has any stack objects. However, FI
+  // resolution actually has another job, not apparent from the title - it
+  // resolves callframe setup/destroy that were not simplified earlier.
+  //
+  // So, this is required for M68k functions that have push sequences even
+  // when there are no stack objects.
+  bool needsFrameIndexResolution(const MachineFunction &MF) const override;
+
+  /// This method should return the base register and offset used to reference
+  /// a frame index location. The offset is returned directly, and the base
+  /// register is returned via FrameReg.
+  StackOffset getFrameIndexReference(const MachineFunction &MF, int FI,
+                                     Register &FrameReg) const override;
+
+  /// Check the instruction before/after the passed instruction. If
+  /// it is an ADD/SUB/LEA instruction it is deleted argument and the
+  /// stack adjustment is returned as a positive value for ADD/LEA and
+  /// a negative for SUB.
+  int mergeSPUpdates(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
+                     bool doMergeWithPrevious) const;
+
+  /// Emit a series of instructions to increment / decrement the stack
+  /// pointer by a constant value.
+  void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
+                    int64_t NumBytes, bool InEpilogue) const;
+};
+} // namespace llvm
+
+#endif

diff  --git a/llvm/lib/Target/M68k/M68kISelDAGToDAG.cpp b/llvm/lib/Target/M68k/M68kISelDAGToDAG.cpp
new file mode 100644
index 000000000000..0076c2647df3
--- /dev/null
+++ b/llvm/lib/Target/M68k/M68kISelDAGToDAG.cpp
@@ -0,0 +1,899 @@
+//===- M68kISelDAGToDAG.cpp - M68k Dag to Dag Inst Selector -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines an instruction selector for the M68K target.
+///
+//===----------------------------------------------------------------------===//
+
+#include "M68k.h"
+
+#include "M68kMachineFunction.h"
+#include "M68kRegisterInfo.h"
+#include "M68kTargetMachine.h"
+
+#include "llvm/CodeGen/MachineConstantPool.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/SelectionDAGISel.h"
+#include "llvm/CodeGen/SelectionDAGNodes.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/Type.h"
+#include "llvm/Support/Alignment.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetMachine.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "m68k-isel"
+
+namespace {
+
+// For reference, the full order of operands for memory references is:
+// (Operand), Displacement, Base, Index, Scale
+struct M68kISelAddressMode {
+  enum class AddrType {
+    ARI,   // Address Register Indirect
+    ARIPI, // Address Register Indirect with Postincrement
+    ARIPD, // Address Register Indirect with Postdecrement
+    ARID,  // Address Register Indirect with Displacement
+    ARII,  // Address Register Indirect with Index
+    PCD,   // Program Counter Indirect with Displacement
+    PCI,   // Program Counter Indirect with Index
+    AL,    // Absolute
+  };
+  AddrType AM;
+
+  enum class Base { RegBase, FrameIndexBase };
+  Base BaseType;
+
+  int64_t Disp;
+
+  // This is really a union, discriminated by BaseType!
+  SDValue BaseReg;
+  int BaseFrameIndex;
+
+  SDValue IndexReg;
+  unsigned Scale;
+
+  const GlobalValue *GV;
+  const Constant *CP;
+  const BlockAddress *BlockAddr;
+  const char *ES;
+  MCSymbol *MCSym;
+  int JT;
+  Align Alignment; // CP alignment.
+
+  unsigned char SymbolFlags; // M68kII::MO_*
+
+  M68kISelAddressMode(AddrType AT)
+      : AM(AT), BaseType(Base::RegBase), Disp(0), BaseFrameIndex(0), IndexReg(),
+        Scale(1), GV(nullptr), CP(nullptr), BlockAddr(nullptr), ES(nullptr),
+        MCSym(nullptr), JT(-1), Alignment(), SymbolFlags(M68kII::MO_NO_FLAG) {}
+
+  bool hasSymbolicDisplacement() const {
+    return GV != nullptr || CP != nullptr || ES != nullptr ||
+           MCSym != nullptr || JT != -1 || BlockAddr != nullptr;
+  }
+
+  bool hasBase() const {
+    return BaseType == Base::FrameIndexBase || BaseReg.getNode() != nullptr;
+  }
+
+  bool hasFrameIndex() const { return BaseType == Base::FrameIndexBase; }
+
+  bool hasBaseReg() const {
+    return BaseType == Base::RegBase && BaseReg.getNode() != nullptr;
+  }
+
+  bool hasIndexReg() const {
+    return BaseType == Base::RegBase && IndexReg.getNode() != nullptr;
+  }
+
+  /// True if address mode type supports displacement
+  bool isDispAddrType() const {
+    return AM == AddrType::ARII || AM == AddrType::PCI ||
+           AM == AddrType::ARID || AM == AddrType::PCD || AM == AddrType::AL;
+  }
+
+  unsigned getDispSize() const {
+    switch (AM) {
+    default:
+      return 0;
+    case AddrType::ARII:
+    case AddrType::PCI:
+      return 8;
+    // These two in the next chip generations can hold upto 32 bit
+    case AddrType::ARID:
+    case AddrType::PCD:
+      return 16;
+    case AddrType::AL:
+      return 32;
+    }
+  }
+
+  bool hasDisp() const { return getDispSize() != 0; }
+  bool isDisp8() const { return getDispSize() == 8; }
+  bool isDisp16() const { return getDispSize() == 16; }
+  bool isDisp32() const { return getDispSize() == 32; }
+
+  /// Return true if this addressing mode is already PC-relative.
+  bool isPCRelative() const {
+    if (BaseType != Base::RegBase)
+      return false;
+    if (auto *RegNode = dyn_cast_or_null<RegisterSDNode>(BaseReg.getNode()))
+      return RegNode->getReg() == M68k::PC;
+    return false;
+  }
+
+  void setBaseReg(SDValue Reg) {
+    BaseType = Base::RegBase;
+    BaseReg = Reg;
+  }
+
+  void setIndexReg(SDValue Reg) { IndexReg = Reg; }
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+  void dump() {
+    dbgs() << "M68kISelAddressMode " << this;
+    dbgs() << "\nDisp: " << Disp;
+    dbgs() << ", BaseReg: ";
+    if (BaseReg.getNode())
+      BaseReg.getNode()->dump();
+    else
+      dbgs() << "null";
+    dbgs() << ", BaseFI: " << BaseFrameIndex;
+    dbgs() << ", IndexReg: ";
+    if (IndexReg.getNode()) {
+      IndexReg.getNode()->dump();
+    } else {
+      dbgs() << "null";
+      dbgs() << ", Scale: " << Scale;
+    }
+    dbgs() << '\n';
+  }
+#endif
+};
+} // end anonymous namespace
+
+namespace {
+
+class M68kDAGToDAGISel : public SelectionDAGISel {
+public:
+  explicit M68kDAGToDAGISel(M68kTargetMachine &TM)
+      : SelectionDAGISel(TM), Subtarget(nullptr) {}
+
+  StringRef getPassName() const override {
+    return "M68k DAG->DAG Pattern Instruction Selection";
+  }
+
+  bool runOnMachineFunction(MachineFunction &MF) override;
+
+private:
+  /// Keep a pointer to the M68kSubtarget around so that we can
+  /// make the right decision when generating code for 
diff erent targets.
+  const M68kSubtarget *Subtarget;
+
+// Include the pieces autogenerated from the target description.
+#include "M68kGenDAGISel.inc"
+
+  /// getTargetMachine - Return a reference to the TargetMachine, casted
+  /// to the target-specific type.
+  const M68kTargetMachine &getTargetMachine() {
+    return static_cast<const M68kTargetMachine &>(TM);
+  }
+
+  void Select(SDNode *N) override;
+
+  // Insert instructions to initialize the global base register in the
+  // first MBB of the function.
+  // HMM... do i need this?
+  void initGlobalBaseReg(MachineFunction &MF);
+
+  bool foldOffsetIntoAddress(uint64_t Offset, M68kISelAddressMode &AM);
+
+  bool matchLoadInAddress(LoadSDNode *N, M68kISelAddressMode &AM);
+  bool matchAddress(SDValue N, M68kISelAddressMode &AM);
+  bool matchAddressBase(SDValue N, M68kISelAddressMode &AM);
+  bool matchAddressRecursively(SDValue N, M68kISelAddressMode &AM,
+                               unsigned Depth);
+  bool matchADD(SDValue &N, M68kISelAddressMode &AM, unsigned Depth);
+  bool matchWrapper(SDValue N, M68kISelAddressMode &AM);
+
+  std::pair<bool, SDNode *> selectNode(SDNode *Node);
+
+  bool SelectARI(SDNode *Parent, SDValue N, SDValue &Base);
+  bool SelectARIPI(SDNode *Parent, SDValue N, SDValue &Base);
+  bool SelectARIPD(SDNode *Parent, SDValue N, SDValue &Base);
+  bool SelectARID(SDNode *Parent, SDValue N, SDValue &Imm, SDValue &Base);
+  bool SelectARII(SDNode *Parent, SDValue N, SDValue &Imm, SDValue &Base,
+                  SDValue &Index);
+  bool SelectAL(SDNode *Parent, SDValue N, SDValue &Sym);
+  bool SelectPCD(SDNode *Parent, SDValue N, SDValue &Imm);
+  bool SelectPCI(SDNode *Parent, SDValue N, SDValue &Imm, SDValue &Index);
+
+  // If Address Mode represents Frame Index store FI in Disp and
+  // Displacement bit size in Base. These values are read symmetrically by
+  // M68kRegisterInfo::eliminateFrameIndex method
+  inline bool getFrameIndexAddress(M68kISelAddressMode &AM, const SDLoc &DL,
+                                   SDValue &Disp, SDValue &Base) {
+    if (AM.BaseType == M68kISelAddressMode::Base::FrameIndexBase) {
+      Disp = getI32Imm(AM.Disp, DL);
+      Base = CurDAG->getTargetFrameIndex(
+          AM.BaseFrameIndex, TLI->getPointerTy(CurDAG->getDataLayout()));
+      return true;
+    }
+
+    return false;
+  }
+
+  // Gets a symbol plus optional displacement
+  inline bool getSymbolicDisplacement(M68kISelAddressMode &AM, const SDLoc &DL,
+                                      SDValue &Sym) {
+    if (AM.GV) {
+      Sym = CurDAG->getTargetGlobalAddress(AM.GV, SDLoc(), MVT::i32, AM.Disp,
+                                           AM.SymbolFlags);
+      return true;
+    }
+
+    if (AM.CP) {
+      Sym = CurDAG->getTargetConstantPool(AM.CP, MVT::i32, AM.Alignment,
+                                          AM.Disp, AM.SymbolFlags);
+      return true;
+    }
+
+    if (AM.ES) {
+      assert(!AM.Disp && "Non-zero displacement is ignored with ES.");
+      Sym = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags);
+      return true;
+    }
+
+    if (AM.MCSym) {
+      assert(!AM.Disp && "Non-zero displacement is ignored with MCSym.");
+      assert(AM.SymbolFlags == 0 && "oo");
+      Sym = CurDAG->getMCSymbol(AM.MCSym, MVT::i32);
+      return true;
+    }
+
+    if (AM.JT != -1) {
+      assert(!AM.Disp && "Non-zero displacement is ignored with JT.");
+      Sym = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags);
+      return true;
+    }
+
+    if (AM.BlockAddr) {
+      Sym = CurDAG->getTargetBlockAddress(AM.BlockAddr, MVT::i32, AM.Disp,
+                                          AM.SymbolFlags);
+      return true;
+    }
+
+    return false;
+  }
+
+  /// Return a target constant with the specified value of type i8.
+  inline SDValue getI8Imm(int64_t Imm, const SDLoc &DL) {
+    return CurDAG->getTargetConstant(Imm, DL, MVT::i8);
+  }
+
+  /// Return a target constant with the specified value of type i8.
+  inline SDValue getI16Imm(int64_t Imm, const SDLoc &DL) {
+    return CurDAG->getTargetConstant(Imm, DL, MVT::i16);
+  }
+
+  /// Return a target constant with the specified value, of type i32.
+  inline SDValue getI32Imm(int64_t Imm, const SDLoc &DL) {
+    return CurDAG->getTargetConstant(Imm, DL, MVT::i32);
+  }
+
+  /// Return a reference to the TargetInstrInfo, casted to the target-specific
+  /// type.
+  const M68kInstrInfo *getInstrInfo() const {
+    return Subtarget->getInstrInfo();
+  }
+
+  /// Return an SDNode that returns the value of the global base register.
+  /// Output instructions required to initialize the global base register,
+  /// if necessary.
+  SDNode *getGlobalBaseReg();
+};
+} // namespace
+
+bool M68kDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
+  Subtarget = &static_cast<const M68kSubtarget &>(MF.getSubtarget());
+  return SelectionDAGISel::runOnMachineFunction(MF);
+}
+
+/// This pass converts a legalized DAG into a M68k-specific DAG,
+/// ready for instruction scheduling.
+FunctionPass *llvm::createM68kISelDag(M68kTargetMachine &TM) {
+  return new M68kDAGToDAGISel(TM);
+}
+
+static bool doesDispFitFI(M68kISelAddressMode &AM) {
+  if (!AM.isDispAddrType())
+    return false;
+  // -1 to make sure that resolved FI will fit into Disp field
+  return isIntN(AM.getDispSize() - 1, AM.Disp);
+}
+
+static bool doesDispFit(M68kISelAddressMode &AM, int64_t Val) {
+  if (!AM.isDispAddrType())
+    return false;
+  return isIntN(AM.getDispSize(), Val);
+}
+
+/// Return an SDNode that returns the value of the global base register.
+/// Output instructions required to initialize the global base register,
+/// if necessary.
+SDNode *M68kDAGToDAGISel::getGlobalBaseReg() {
+  unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF);
+  auto &DL = MF->getDataLayout();
+  return CurDAG->getRegister(GlobalBaseReg, TLI->getPointerTy(DL)).getNode();
+}
+
+bool M68kDAGToDAGISel::foldOffsetIntoAddress(uint64_t Offset,
+                                             M68kISelAddressMode &AM) {
+  // Cannot combine ExternalSymbol displacements with integer offsets.
+  if (Offset != 0 && (AM.ES || AM.MCSym))
+    return false;
+
+  int64_t Val = AM.Disp + Offset;
+
+  if (doesDispFit(AM, Val)) {
+    AM.Disp = Val;
+    return true;
+  }
+
+  return false;
+}
+
+//===----------------------------------------------------------------------===//
+// Matchers
+//===----------------------------------------------------------------------===//
+
+/// Helper for MatchAddress. Add the specified node to the
+/// specified addressing mode without any further recursion.
+bool M68kDAGToDAGISel::matchAddressBase(SDValue N, M68kISelAddressMode &AM) {
+  // Is the base register already occupied?
+  if (AM.hasBase()) {
+    // If so, check to see if the scale index register is set.
+    if (!AM.hasIndexReg()) {
+      AM.IndexReg = N;
+      AM.Scale = 1;
+      return true;
+    }
+
+    // Otherwise, we cannot select it.
+    return false;
+  }
+
+  // Default, generate it as a register.
+  AM.BaseType = M68kISelAddressMode::Base::RegBase;
+  AM.BaseReg = N;
+  return true;
+}
+
+/// TODO Add TLS support
+bool M68kDAGToDAGISel::matchLoadInAddress(LoadSDNode *N,
+                                          M68kISelAddressMode &AM) {
+  return false;
+}
+
+bool M68kDAGToDAGISel::matchAddressRecursively(SDValue N,
+                                               M68kISelAddressMode &AM,
+                                               unsigned Depth) {
+  SDLoc DL(N);
+
+  // Limit recursion.
+  if (Depth > 5)
+    return matchAddressBase(N, AM);
+
+  // If this is already a %PC relative address, we can only merge immediates
+  // into it.  Instead of handling this in every case, we handle it here.
+  // PC relative addressing: %PC + 16-bit displacement!
+  if (AM.isPCRelative()) {
+    // FIXME JumpTable and ExternalSymbol address currently don't like
+    // displacements.  It isn't very important, but should be fixed for
+    // consistency.
+
+    if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N))
+      if (foldOffsetIntoAddress(Cst->getSExtValue(), AM))
+        return true;
+    return false;
+  }
+
+  switch (N.getOpcode()) {
+  default:
+    break;
+
+  case ISD::Constant: {
+    uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
+    if (foldOffsetIntoAddress(Val, AM))
+      return true;
+    break;
+  }
+
+  case M68kISD::Wrapper:
+  case M68kISD::WrapperPC:
+    if (matchWrapper(N, AM))
+      return true;
+    break;
+
+  case ISD::LOAD:
+    if (matchLoadInAddress(cast<LoadSDNode>(N), AM))
+      return true;
+    break;
+
+  case ISD::OR:
+    // We want to look through a transform in InstCombine and DAGCombiner that
+    // turns 'add' into 'or', so we can treat this 'or' exactly like an 'add'.
+    // Example: (or (and x, 1), (shl y, 3)) --> (add (and x, 1), (shl y, 3))
+    // An 'lea' can then be used to match the shift (multiply) and add:
+    // and $1, %esi
+    // lea (%rsi, %rdi, 8), %rax
+    if (CurDAG->haveNoCommonBitsSet(N.getOperand(0), N.getOperand(1)) &&
+        matchADD(N, AM, Depth))
+      return true;
+    break;
+
+  case ISD::ADD:
+    if (matchADD(N, AM, Depth))
+      return true;
+    break;
+
+  case ISD::FrameIndex:
+    if (AM.isDispAddrType() &&
+        AM.BaseType == M68kISelAddressMode::Base::RegBase &&
+        AM.BaseReg.getNode() == nullptr && doesDispFitFI(AM)) {
+      AM.BaseType = M68kISelAddressMode::Base::FrameIndexBase;
+      AM.BaseFrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
+      return true;
+    }
+    break;
+  }
+
+  return matchAddressBase(N, AM);
+}
+
+/// Add the specified node to the specified addressing mode, returning true if
+/// it cannot be done. This just pattern matches for the addressing mode.
+bool M68kDAGToDAGISel::matchAddress(SDValue N, M68kISelAddressMode &AM) {
+  // TODO: Post-processing: Convert lea(,%reg,2) to lea(%reg,%reg), which has
+  // a smaller encoding and avoids a scaled-index.
+  // And make sure it is an indexed mode
+
+  // TODO: Post-processing: Convert foo to foo(%pc), even in non-PIC mode,
+  // because it has a smaller encoding.
+  // Make sure this must be done only if PC* modes are currently being matched
+  return matchAddressRecursively(N, AM, 0);
+}
+
+bool M68kDAGToDAGISel::matchADD(SDValue &N, M68kISelAddressMode &AM,
+                                unsigned Depth) {
+  // Add an artificial use to this node so that we can keep track of
+  // it if it gets CSE'd with a 
diff erent node.
+  HandleSDNode Handle(N);
+
+  M68kISelAddressMode Backup = AM;
+  if (matchAddressRecursively(N.getOperand(0), AM, Depth + 1) &&
+      matchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth + 1)) {
+    return true;
+  }
+  AM = Backup;
+
+  // Try again after commuting the operands.
+  if (matchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth + 1) &&
+      matchAddressRecursively(Handle.getValue().getOperand(0), AM, Depth + 1)) {
+    return true;
+  }
+  AM = Backup;
+
+  // If we couldn't fold both operands into the address at the same time,
+  // see if we can just put each operand into a register and fold at least
+  // the add.
+  if (!AM.hasBase() && !AM.hasIndexReg()) {
+    N = Handle.getValue();
+    AM.BaseReg = N.getOperand(0);
+    AM.IndexReg = N.getOperand(1);
+    AM.Scale = 1;
+    return true;
+  }
+
+  N = Handle.getValue();
+  return false;
+}
+
+/// Try to match M68kISD::Wrapper and M68kISD::WrapperPC nodes into an
+/// addressing mode. These wrap things that will resolve down into a symbol
+/// reference. If no match is possible, this returns true, otherwise it returns
+/// false.
+bool M68kDAGToDAGISel::matchWrapper(SDValue N, M68kISelAddressMode &AM) {
+  // If the addressing mode already has a symbol as the displacement, we can
+  // never match another symbol.
+  if (AM.hasSymbolicDisplacement())
+    return false;
+
+  SDValue N0 = N.getOperand(0);
+
+  if (N.getOpcode() == M68kISD::WrapperPC) {
+
+    // If cannot match here just restore the old version
+    M68kISelAddressMode Backup = AM;
+
+    if (AM.hasBase()) {
+      return false;
+    }
+
+    if (auto *G = dyn_cast<GlobalAddressSDNode>(N0)) {
+      AM.GV = G->getGlobal();
+      AM.SymbolFlags = G->getTargetFlags();
+      if (!foldOffsetIntoAddress(G->getOffset(), AM)) {
+        AM = Backup;
+        return false;
+      }
+    } else if (auto *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
+      AM.CP = CP->getConstVal();
+      AM.Alignment = CP->getAlign();
+      AM.SymbolFlags = CP->getTargetFlags();
+      if (!foldOffsetIntoAddress(CP->getOffset(), AM)) {
+        AM = Backup;
+        return false;
+      }
+    } else if (auto *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
+      AM.ES = S->getSymbol();
+      AM.SymbolFlags = S->getTargetFlags();
+    } else if (auto *S = dyn_cast<MCSymbolSDNode>(N0)) {
+      AM.MCSym = S->getMCSymbol();
+    } else if (auto *J = dyn_cast<JumpTableSDNode>(N0)) {
+      AM.JT = J->getIndex();
+      AM.SymbolFlags = J->getTargetFlags();
+    } else if (auto *BA = dyn_cast<BlockAddressSDNode>(N0)) {
+      AM.BlockAddr = BA->getBlockAddress();
+      AM.SymbolFlags = BA->getTargetFlags();
+      if (!foldOffsetIntoAddress(BA->getOffset(), AM)) {
+        AM = Backup;
+        return false;
+      }
+    } else
+      llvm_unreachable("Unhandled symbol reference node.");
+
+    AM.setBaseReg(CurDAG->getRegister(M68k::PC, MVT::i32));
+    return true;
+  }
+
+  // This wrapper requires 32bit disp/imm field for Medium CM
+  if (!AM.isDisp32()) {
+    return false;
+  }
+
+  if (N.getOpcode() == M68kISD::Wrapper) {
+    if (auto *G = dyn_cast<GlobalAddressSDNode>(N0)) {
+      AM.GV = G->getGlobal();
+      AM.Disp += G->getOffset();
+      AM.SymbolFlags = G->getTargetFlags();
+    } else if (auto *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
+      AM.CP = CP->getConstVal();
+      AM.Alignment = CP->getAlign();
+      AM.Disp += CP->getOffset();
+      AM.SymbolFlags = CP->getTargetFlags();
+    } else if (auto *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
+      AM.ES = S->getSymbol();
+      AM.SymbolFlags = S->getTargetFlags();
+    } else if (auto *S = dyn_cast<MCSymbolSDNode>(N0)) {
+      AM.MCSym = S->getMCSymbol();
+    } else if (auto *J = dyn_cast<JumpTableSDNode>(N0)) {
+      AM.JT = J->getIndex();
+      AM.SymbolFlags = J->getTargetFlags();
+    } else if (auto *BA = dyn_cast<BlockAddressSDNode>(N0)) {
+      AM.BlockAddr = BA->getBlockAddress();
+      AM.Disp += BA->getOffset();
+      AM.SymbolFlags = BA->getTargetFlags();
+    } else
+      llvm_unreachable("Unhandled symbol reference node.");
+    return true;
+  }
+
+  return false;
+}
+
+//===----------------------------------------------------------------------===//
+// Selectors
+//===----------------------------------------------------------------------===//
+
+void M68kDAGToDAGISel::Select(SDNode *Node) {
+  unsigned Opcode = Node->getOpcode();
+  SDLoc DL(Node);
+
+  LLVM_DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << '\n');
+
+  if (Node->isMachineOpcode()) {
+    LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n');
+    Node->setNodeId(-1);
+    return; // Already selected.
+  }
+
+  switch (Opcode) {
+  default:
+    break;
+
+  case M68kISD::GLOBAL_BASE_REG:
+    ReplaceNode(Node, getGlobalBaseReg());
+    return;
+  }
+
+  SelectCode(Node);
+}
+
+bool M68kDAGToDAGISel::SelectARIPI(SDNode *Parent, SDValue N, SDValue &Base) {
+  LLVM_DEBUG(dbgs() << "Selecting AddrType::ARIPI: ");
+  LLVM_DEBUG(dbgs() << "NOT IMPLEMENTED\n");
+  return false;
+}
+
+bool M68kDAGToDAGISel::SelectARIPD(SDNode *Parent, SDValue N, SDValue &Base) {
+  LLVM_DEBUG(dbgs() << "Selecting AddrType::ARIPD: ");
+  LLVM_DEBUG(dbgs() << "NOT IMPLEMENTED\n");
+  return false;
+}
+
+bool M68kDAGToDAGISel::SelectARID(SDNode *Parent, SDValue N, SDValue &Disp,
+                                  SDValue &Base) {
+  LLVM_DEBUG(dbgs() << "Selecting AddrType::ARID: ");
+  M68kISelAddressMode AM(M68kISelAddressMode::AddrType::ARID);
+
+  if (!matchAddress(N, AM))
+    return false;
+
+  if (AM.isPCRelative()) {
+    LLVM_DEBUG(dbgs() << "REJECT: Cannot match PC relative address\n");
+    return false;
+  }
+
+  // If this is a frame index, grab it
+  if (getFrameIndexAddress(AM, SDLoc(N), Disp, Base)) {
+    LLVM_DEBUG(dbgs() << "SUCCESS matched FI\n");
+    return true;
+  }
+
+  if (AM.hasIndexReg()) {
+    LLVM_DEBUG(dbgs() << "REJECT: Cannot match Index\n");
+    return false;
+  }
+
+  if (!AM.hasBaseReg()) {
+    LLVM_DEBUG(dbgs() << "REJECT: No Base reg\n");
+    return false;
+  }
+
+  if (getSymbolicDisplacement(AM, SDLoc(N), Disp)) {
+    assert(!AM.Disp && "Should not be any displacement");
+    LLVM_DEBUG(dbgs() << "SUCCESS, matched Symbol\n");
+    return true;
+  }
+
+  // Give a chance to AddrType::ARI
+  if (AM.Disp == 0) {
+    LLVM_DEBUG(dbgs() << "REJECT: No displacement\n");
+    return false;
+  }
+
+  Base = AM.BaseReg;
+  Disp = getI16Imm(AM.Disp, SDLoc(N));
+
+  LLVM_DEBUG(dbgs() << "SUCCESS\n");
+  return true;
+}
+
+static bool isAddressBase(const SDValue &N) {
+  switch (N.getOpcode()) {
+  case ISD::ADD:
+  case ISD::ADDC:
+    return llvm::any_of(N.getNode()->ops(),
+                        [](const SDUse &U) { return isAddressBase(U.get()); });
+  case M68kISD::Wrapper:
+  case M68kISD::WrapperPC:
+  case M68kISD::GLOBAL_BASE_REG:
+    return true;
+  default:
+    return false;
+  }
+}
+
+bool M68kDAGToDAGISel::SelectARII(SDNode *Parent, SDValue N, SDValue &Disp,
+                                  SDValue &Base, SDValue &Index) {
+  M68kISelAddressMode AM(M68kISelAddressMode::AddrType::ARII);
+  LLVM_DEBUG(dbgs() << "Selecting AddrType::ARII: ");
+
+  if (!matchAddress(N, AM))
+    return false;
+
+  if (AM.isPCRelative()) {
+    LLVM_DEBUG(dbgs() << "REJECT: PC relative\n");
+    return false;
+  }
+
+  if (!AM.hasIndexReg()) {
+    LLVM_DEBUG(dbgs() << "REJECT: No Index\n");
+    return false;
+  }
+
+  if (!AM.hasBaseReg()) {
+    LLVM_DEBUG(dbgs() << "REJECT: No Base\n");
+    return false;
+  }
+
+  if (!isAddressBase(AM.BaseReg) && isAddressBase(AM.IndexReg)) {
+    Base = AM.IndexReg;
+    Index = AM.BaseReg;
+  } else {
+    Base = AM.BaseReg;
+    Index = AM.IndexReg;
+  }
+
+  if (AM.hasSymbolicDisplacement()) {
+    LLVM_DEBUG(dbgs() << "REJECT, Cannot match symbolic displacement\n");
+    return false;
+  }
+
+  // The idea here is that we want to use AddrType::ARII without displacement
+  // only if necessary like memory operations, otherwise this must be lowered
+  // into addition
+  if (AM.Disp == 0 && (!Parent || (Parent->getOpcode() != ISD::LOAD &&
+                                   Parent->getOpcode() != ISD::STORE))) {
+    LLVM_DEBUG(dbgs() << "REJECT: Displacement is Zero\n");
+    return false;
+  }
+
+  Disp = getI8Imm(AM.Disp, SDLoc(N));
+
+  LLVM_DEBUG(dbgs() << "SUCCESS\n");
+  return true;
+}
+
+bool M68kDAGToDAGISel::SelectAL(SDNode *Parent, SDValue N, SDValue &Sym) {
+  LLVM_DEBUG(dbgs() << "Selecting AddrType::AL: ");
+  M68kISelAddressMode AM(M68kISelAddressMode::AddrType::AL);
+
+  if (!matchAddress(N, AM)) {
+    LLVM_DEBUG(dbgs() << "REJECT: Match failed\n");
+    return false;
+  }
+
+  if (AM.isPCRelative()) {
+    LLVM_DEBUG(dbgs() << "REJECT: Cannot match PC relative address\n");
+    return false;
+  }
+
+  if (AM.hasBase()) {
+    LLVM_DEBUG(dbgs() << "REJECT: Cannot match Base\n");
+    return false;
+  }
+
+  if (AM.hasIndexReg()) {
+    LLVM_DEBUG(dbgs() << "REJECT: Cannot match Index\n");
+    return false;
+  }
+
+  if (getSymbolicDisplacement(AM, SDLoc(N), Sym)) {
+    LLVM_DEBUG(dbgs() << "SUCCESS: Matched symbol\n");
+    return true;
+  }
+
+  if (AM.Disp) {
+    Sym = getI32Imm(AM.Disp, SDLoc(N));
+    LLVM_DEBUG(dbgs() << "SUCCESS\n");
+    return true;
+  }
+
+  LLVM_DEBUG(dbgs() << "REJECT: Not Symbol or Disp\n");
+  return false;
+  ;
+}
+
+bool M68kDAGToDAGISel::SelectPCD(SDNode *Parent, SDValue N, SDValue &Disp) {
+  LLVM_DEBUG(dbgs() << "Selecting AddrType::PCD: ");
+  M68kISelAddressMode AM(M68kISelAddressMode::AddrType::PCD);
+
+  if (!matchAddress(N, AM))
+    return false;
+
+  if (!AM.isPCRelative()) {
+    LLVM_DEBUG(dbgs() << "REJECT: Not PC relative\n");
+    return false;
+  }
+
+  if (AM.hasIndexReg()) {
+    LLVM_DEBUG(dbgs() << "REJECT: Cannot match Index\n");
+    return false;
+  }
+
+  if (getSymbolicDisplacement(AM, SDLoc(N), Disp)) {
+    LLVM_DEBUG(dbgs() << "SUCCESS, matched Symbol\n");
+    return true;
+  }
+
+  Disp = getI16Imm(AM.Disp, SDLoc(N));
+
+  LLVM_DEBUG(dbgs() << "SUCCESS\n");
+  return true;
+}
+
+bool M68kDAGToDAGISel::SelectPCI(SDNode *Parent, SDValue N, SDValue &Disp,
+                                 SDValue &Index) {
+  LLVM_DEBUG(dbgs() << "Selecting AddrType::PCI: ");
+  M68kISelAddressMode AM(M68kISelAddressMode::AddrType::PCI);
+
+  if (!matchAddress(N, AM))
+    return false;
+
+  if (!AM.isPCRelative()) {
+    LLVM_DEBUG(dbgs() << "REJECT: Not PC relative\n");
+    return false;
+  }
+
+  if (!AM.hasIndexReg()) {
+    LLVM_DEBUG(dbgs() << "REJECT: No Index\n");
+    return false;
+  }
+
+  Index = AM.IndexReg;
+
+  if (getSymbolicDisplacement(AM, SDLoc(N), Disp)) {
+    assert(!AM.Disp && "Should not be any displacement");
+    LLVM_DEBUG(dbgs() << "SUCCESS, matched Symbol\n");
+    return true;
+  }
+
+  Disp = getI8Imm(AM.Disp, SDLoc(N));
+
+  LLVM_DEBUG(dbgs() << "SUCCESS\n");
+  return true;
+}
+
+bool M68kDAGToDAGISel::SelectARI(SDNode *Parent, SDValue N, SDValue &Base) {
+  LLVM_DEBUG(dbgs() << "Selecting AddrType::ARI: ");
+  M68kISelAddressMode AM(M68kISelAddressMode::AddrType::ARI);
+
+  if (!matchAddress(N, AM)) {
+    LLVM_DEBUG(dbgs() << "REJECT: Match failed\n");
+    return false;
+  }
+
+  if (AM.isPCRelative()) {
+    LLVM_DEBUG(dbgs() << "REJECT: Cannot match PC relative address\n");
+    return false;
+  }
+
+  // AddrType::ARI does not use these
+  if (AM.hasIndexReg() || AM.Disp != 0) {
+    LLVM_DEBUG(dbgs() << "REJECT: Cannot match Index or Disp\n");
+    return false;
+  }
+
+  // Must be matched by AddrType::AL
+  if (AM.hasSymbolicDisplacement()) {
+    LLVM_DEBUG(dbgs() << "REJECT: Cannot match Symbolic Disp\n");
+    return false;
+  }
+
+  if (AM.hasBaseReg()) {
+    Base = AM.BaseReg;
+    LLVM_DEBUG(dbgs() << "SUCCESS\n");
+    return true;
+  }
+
+  return false;
+}

diff  --git a/llvm/lib/Target/M68k/M68kISelLowering.cpp b/llvm/lib/Target/M68k/M68kISelLowering.cpp
new file mode 100644
index 000000000000..cf9f1406b0c1
--- /dev/null
+++ b/llvm/lib/Target/M68k/M68kISelLowering.cpp
@@ -0,0 +1,3227 @@
+//===-- M68kISelLowering.cpp - M68k DAG Lowering Impl ------*- C++ -*--===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the interfaces that M68k uses to lower LLVM code into a
+/// selection DAG.
+///
+//===----------------------------------------------------------------------===//
+
+#include "M68kISelLowering.h"
+#include "M68kCallingConv.h"
+#include "M68kMachineFunction.h"
+#include "M68kSubtarget.h"
+#include "M68kTargetMachine.h"
+#include "M68kTargetObjectFile.h"
+
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/CallingConvLower.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineJumpTableInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/KnownBits.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "M68k-isel"
+
+STATISTIC(NumTailCalls, "Number of tail calls");
+
+M68kTargetLowering::M68kTargetLowering(const M68kTargetMachine &TM,
+                                       const M68kSubtarget &STI)
+    : TargetLowering(TM), Subtarget(STI), TM(TM) {
+
+  MVT PtrVT = MVT::i32;
+
+  setBooleanContents(ZeroOrOneBooleanContent);
+
+  auto *RegInfo = Subtarget.getRegisterInfo();
+  setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
+
+  // Set up the register classes.
+  addRegisterClass(MVT::i8, &M68k::DR8RegClass);
+  addRegisterClass(MVT::i16, &M68k::XR16RegClass);
+  addRegisterClass(MVT::i32, &M68k::XR32RegClass);
+
+  for (auto VT : MVT::integer_valuetypes()) {
+    setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
+    setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
+    setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
+  }
+
+  // We don't accept any truncstore of integer registers.
+  setTruncStoreAction(MVT::i64, MVT::i32, Expand);
+  setTruncStoreAction(MVT::i64, MVT::i16, Expand);
+  setTruncStoreAction(MVT::i64, MVT::i8, Expand);
+  setTruncStoreAction(MVT::i32, MVT::i16, Expand);
+  setTruncStoreAction(MVT::i32, MVT::i8, Expand);
+  setTruncStoreAction(MVT::i16, MVT::i8, Expand);
+
+  setOperationAction(ISD::MUL, MVT::i8, Promote);
+  setOperationAction(ISD::MUL, MVT::i16, Legal);
+  if (Subtarget.atLeastM68020())
+    setOperationAction(ISD::MUL, MVT::i32, Legal);
+  else
+    setOperationAction(ISD::MUL, MVT::i32, LibCall);
+  setOperationAction(ISD::MUL, MVT::i64, LibCall);
+
+  for (auto OP :
+       {ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM, ISD::UDIVREM, ISD::SDIVREM,
+        ISD::MULHS, ISD::MULHU, ISD::UMUL_LOHI, ISD::SMUL_LOHI}) {
+    setOperationAction(OP, MVT::i8, Promote);
+    setOperationAction(OP, MVT::i16, Legal);
+    setOperationAction(OP, MVT::i32, LibCall);
+  }
+
+  for (auto OP : {ISD::UMUL_LOHI, ISD::SMUL_LOHI}) {
+    setOperationAction(OP, MVT::i8, Expand);
+    setOperationAction(OP, MVT::i16, Expand);
+  }
+
+  // FIXME It would be better to use a custom lowering
+  for (auto OP : {ISD::SMULO, ISD::UMULO}) {
+    setOperationAction(OP, MVT::i8, Expand);
+    setOperationAction(OP, MVT::i16, Expand);
+    setOperationAction(OP, MVT::i32, Expand);
+  }
+
+  // Add/Sub overflow ops with MVT::Glues are lowered to CCR dependences.
+  for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) {
+    setOperationAction(ISD::ADDC, VT, Custom);
+    setOperationAction(ISD::ADDE, VT, Custom);
+    setOperationAction(ISD::SUBC, VT, Custom);
+    setOperationAction(ISD::SUBE, VT, Custom);
+  }
+
+  // SADDO and friends are legal with this setup, i hope
+  for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) {
+    setOperationAction(ISD::SADDO, VT, Custom);
+    setOperationAction(ISD::UADDO, VT, Custom);
+    setOperationAction(ISD::SSUBO, VT, Custom);
+    setOperationAction(ISD::USUBO, VT, Custom);
+  }
+
+  setOperationAction(ISD::BR_JT, MVT::Other, Expand);
+  setOperationAction(ISD::BRCOND, MVT::Other, Custom);
+
+  for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) {
+    setOperationAction(ISD::BR_CC, VT, Expand);
+    setOperationAction(ISD::SELECT, VT, Custom);
+    setOperationAction(ISD::SELECT_CC, VT, Expand);
+    setOperationAction(ISD::SETCC, VT, Custom);
+    setOperationAction(ISD::SETCCCARRY, VT, Custom);
+  }
+
+  for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) {
+    setOperationAction(ISD::BSWAP, VT, Expand);
+    setOperationAction(ISD::CTTZ, VT, Expand);
+    setOperationAction(ISD::CTLZ, VT, Expand);
+    setOperationAction(ISD::CTPOP, VT, Expand);
+  }
+
+  setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
+  setOperationAction(ISD::JumpTable, MVT::i32, Custom);
+  setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
+  setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
+  setOperationAction(ISD::ExternalSymbol, MVT::i32, Custom);
+  setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
+
+  setOperationAction(ISD::VASTART, MVT::Other, Custom);
+  setOperationAction(ISD::VAEND, MVT::Other, Expand);
+  setOperationAction(ISD::VAARG, MVT::Other, Expand);
+  setOperationAction(ISD::VACOPY, MVT::Other, Expand);
+
+  setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
+  setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
+
+  setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
+
+  computeRegisterProperties(STI.getRegisterInfo());
+
+  // 2^2 bytes
+  // FIXME can it be just 2^1?
+  setMinFunctionAlignment(Align::Constant<2>());
+}
+
+EVT M68kTargetLowering::getSetCCResultType(const DataLayout &DL,
+                                           LLVMContext &Context, EVT VT) const {
+  // M68k SETcc producess either 0x00 or 0xFF
+  return MVT::i8;
+}
+
+MVT M68kTargetLowering::getScalarShiftAmountTy(const DataLayout &DL,
+                                               EVT Ty) const {
+  if (Ty.isSimple()) {
+    return Ty.getSimpleVT();
+  }
+  return MVT::getIntegerVT(8 * DL.getPointerSize(0));
+}
+
+#include "M68kGenCallingConv.inc"
+
+enum StructReturnType { NotStructReturn, RegStructReturn, StackStructReturn };
+
+static StructReturnType
+callIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs) {
+  if (Outs.empty())
+    return NotStructReturn;
+
+  const ISD::ArgFlagsTy &Flags = Outs[0].Flags;
+  if (!Flags.isSRet())
+    return NotStructReturn;
+  if (Flags.isInReg())
+    return RegStructReturn;
+  return StackStructReturn;
+}
+
+/// Determines whether a function uses struct return semantics.
+static StructReturnType
+argsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) {
+  if (Ins.empty())
+    return NotStructReturn;
+
+  const ISD::ArgFlagsTy &Flags = Ins[0].Flags;
+  if (!Flags.isSRet())
+    return NotStructReturn;
+  if (Flags.isInReg())
+    return RegStructReturn;
+  return StackStructReturn;
+}
+
+/// Make a copy of an aggregate at address specified by "Src" to address
+/// "Dst" with size and alignment information specified by the specific
+/// parameter attribute. The copy will be passed as a byval function parameter.
+static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
+                                         SDValue Chain, ISD::ArgFlagsTy Flags,
+                                         SelectionDAG &DAG, const SDLoc &DL) {
+  SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), DL, MVT::i32);
+
+  return DAG.getMemcpy(
+      Chain, DL, Dst, Src, SizeNode, Flags.getNonZeroByValAlign(),
+      /*isVolatile=*/false, /*AlwaysInline=*/true,
+      /*isTailCall=*/false, MachinePointerInfo(), MachinePointerInfo());
+}
+
+/// Return true if the calling convention is one that we can guarantee TCO for.
+static bool canGuaranteeTCO(CallingConv::ID CC) { return false; }
+
+/// Return true if we might ever do TCO for calls with this calling convention.
+static bool mayTailCallThisCC(CallingConv::ID CC) {
+  switch (CC) {
+  // C calling conventions:
+  case CallingConv::C:
+    return true;
+  default:
+    return canGuaranteeTCO(CC);
+  }
+}
+
+/// Return true if the function is being made into a tailcall target by
+/// changing its ABI.
+static bool shouldGuaranteeTCO(CallingConv::ID CC, bool GuaranteedTailCallOpt) {
+  return GuaranteedTailCallOpt && canGuaranteeTCO(CC);
+}
+
+/// Return true if the given stack call argument is already available in the
+/// same position (relatively) of the caller's incoming argument stack.
+static bool MatchingStackOffset(SDValue Arg, unsigned Offset,
+                                ISD::ArgFlagsTy Flags, MachineFrameInfo &MFI,
+                                const MachineRegisterInfo *MRI,
+                                const M68kInstrInfo *TII,
+                                const CCValAssign &VA) {
+  unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
+
+  for (;;) {
+    // Look through nodes that don't alter the bits of the incoming value.
+    unsigned Op = Arg.getOpcode();
+    if (Op == ISD::ZERO_EXTEND || Op == ISD::ANY_EXTEND || Op == ISD::BITCAST) {
+      Arg = Arg.getOperand(0);
+      continue;
+    }
+    if (Op == ISD::TRUNCATE) {
+      const SDValue &TruncInput = Arg.getOperand(0);
+      if (TruncInput.getOpcode() == ISD::AssertZext &&
+          cast<VTSDNode>(TruncInput.getOperand(1))->getVT() ==
+              Arg.getValueType()) {
+        Arg = TruncInput.getOperand(0);
+        continue;
+      }
+    }
+    break;
+  }
+
+  int FI = INT_MAX;
+  if (Arg.getOpcode() == ISD::CopyFromReg) {
+    unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
+    if (!Register::isVirtualRegister(VR))
+      return false;
+    MachineInstr *Def = MRI->getVRegDef(VR);
+    if (!Def)
+      return false;
+    if (!Flags.isByVal()) {
+      if (!TII->isLoadFromStackSlot(*Def, FI))
+        return false;
+    } else {
+      unsigned Opcode = Def->getOpcode();
+      if ((Opcode == M68k::LEA32p || Opcode == M68k::LEA32f) &&
+          Def->getOperand(1).isFI()) {
+        FI = Def->getOperand(1).getIndex();
+        Bytes = Flags.getByValSize();
+      } else
+        return false;
+    }
+  } else if (auto *Ld = dyn_cast<LoadSDNode>(Arg)) {
+    if (Flags.isByVal())
+      // ByVal argument is passed in as a pointer but it's now being
+      // dereferenced. e.g.
+      // define @foo(%struct.X* %A) {
+      //   tail call @bar(%struct.X* byval %A)
+      // }
+      return false;
+    SDValue Ptr = Ld->getBasePtr();
+    FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
+    if (!FINode)
+      return false;
+    FI = FINode->getIndex();
+  } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
+    FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
+    FI = FINode->getIndex();
+    Bytes = Flags.getByValSize();
+  } else
+    return false;
+
+  assert(FI != INT_MAX);
+  if (!MFI.isFixedObjectIndex(FI))
+    return false;
+
+  if (Offset != MFI.getObjectOffset(FI))
+    return false;
+
+  if (VA.getLocVT().getSizeInBits() > Arg.getValueType().getSizeInBits()) {
+    // If the argument location is wider than the argument type, check that any
+    // extension flags match.
+    if (Flags.isZExt() != MFI.isObjectZExt(FI) ||
+        Flags.isSExt() != MFI.isObjectSExt(FI)) {
+      return false;
+    }
+  }
+
+  return Bytes == MFI.getObjectSize(FI);
+}
+
+SDValue
+M68kTargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
+  MachineFunction &MF = DAG.getMachineFunction();
+  M68kMachineFunctionInfo *FuncInfo = MF.getInfo<M68kMachineFunctionInfo>();
+  int ReturnAddrIndex = FuncInfo->getRAIndex();
+
+  if (ReturnAddrIndex == 0) {
+    // Set up a frame object for the return address.
+    unsigned SlotSize = Subtarget.getSlotSize();
+    ReturnAddrIndex = MF.getFrameInfo().CreateFixedObject(
+        SlotSize, -(int64_t)SlotSize, false);
+    FuncInfo->setRAIndex(ReturnAddrIndex);
+  }
+
+  return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy(DAG.getDataLayout()));
+}
+
+SDValue M68kTargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
+                                                    SDValue &OutRetAddr,
+                                                    SDValue Chain,
+                                                    bool IsTailCall, int FPDiff,
+                                                    const SDLoc &DL) const {
+  EVT VT = getPointerTy(DAG.getDataLayout());
+  OutRetAddr = getReturnAddressFrameIndex(DAG);
+
+  // Load the "old" Return address.
+  OutRetAddr = DAG.getLoad(VT, DL, Chain, OutRetAddr, MachinePointerInfo());
+  return SDValue(OutRetAddr.getNode(), 1);
+}
+
+SDValue M68kTargetLowering::EmitTailCallStoreRetAddr(
+    SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue RetFI,
+    EVT PtrVT, unsigned SlotSize, int FPDiff, const SDLoc &DL) const {
+  if (!FPDiff)
+    return Chain;
+
+  // Calculate the new stack slot for the return address.
+  int NewFO = MF.getFrameInfo().CreateFixedObject(
+      SlotSize, (int64_t)FPDiff - SlotSize, false);
+
+  SDValue NewFI = DAG.getFrameIndex(NewFO, PtrVT);
+  // Store the return address to the appropriate stack slot.
+  Chain = DAG.getStore(
+      Chain, DL, RetFI, NewFI,
+      MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), NewFO));
+  return Chain;
+}
+
+SDValue
+M68kTargetLowering::LowerMemArgument(SDValue Chain, CallingConv::ID CallConv,
+                                     const SmallVectorImpl<ISD::InputArg> &Ins,
+                                     const SDLoc &DL, SelectionDAG &DAG,
+                                     const CCValAssign &VA,
+                                     MachineFrameInfo &MFI,
+                                     unsigned ArgIdx) const {
+  // Create the nodes corresponding to a load from this parameter slot.
+  ISD::ArgFlagsTy Flags = Ins[ArgIdx].Flags;
+  EVT ValVT;
+
+  // If value is passed by pointer we have address passed instead of the value
+  // itself.
+  if (VA.getLocInfo() == CCValAssign::Indirect)
+    ValVT = VA.getLocVT();
+  else
+    ValVT = VA.getValVT();
+
+  // Because we are dealing with BE architecture we need to offset loading of
+  // partial types
+  int Offset = VA.getLocMemOffset();
+  if (VA.getValVT() == MVT::i8) {
+    Offset += 3;
+  } else if (VA.getValVT() == MVT::i16) {
+    Offset += 2;
+  }
+
+  // TODO Interrupt handlers
+  // Calculate SP offset of interrupt parameter, re-arrange the slot normally
+  // taken by a return address.
+
+  // FIXME For now, all byval parameter objects are marked mutable. This can
+  // be changed with more analysis. In case of tail call optimization mark all
+  // arguments mutable. Since they could be overwritten by lowering of arguments
+  // in case of a tail call.
+  bool AlwaysUseMutable = shouldGuaranteeTCO(
+      CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
+  bool IsImmutable = !AlwaysUseMutable && !Flags.isByVal();
+
+  if (Flags.isByVal()) {
+    unsigned Bytes = Flags.getByValSize();
+    if (Bytes == 0)
+      Bytes = 1; // Don't create zero-sized stack objects.
+    int FI = MFI.CreateFixedObject(Bytes, Offset, IsImmutable);
+    // TODO Interrupt handlers
+    // Adjust SP offset of interrupt parameter.
+    return DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
+  } else {
+    int FI =
+        MFI.CreateFixedObject(ValVT.getSizeInBits() / 8, Offset, IsImmutable);
+
+    // Set SExt or ZExt flag.
+    if (VA.getLocInfo() == CCValAssign::ZExt) {
+      MFI.setObjectZExt(FI, true);
+    } else if (VA.getLocInfo() == CCValAssign::SExt) {
+      MFI.setObjectSExt(FI, true);
+    }
+
+    // TODO Interrupt handlers
+    // Adjust SP offset of interrupt parameter.
+
+    SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
+    SDValue Val = DAG.getLoad(
+        ValVT, DL, Chain, FIN,
+        MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
+    return VA.isExtInLoc() ? DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val)
+                           : Val;
+  }
+}
+
+SDValue M68kTargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr,
+                                             SDValue Arg, const SDLoc &DL,
+                                             SelectionDAG &DAG,
+                                             const CCValAssign &VA,
+                                             ISD::ArgFlagsTy Flags) const {
+  unsigned LocMemOffset = VA.getLocMemOffset();
+  SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, DL);
+  PtrOff = DAG.getNode(ISD::ADD, DL, getPointerTy(DAG.getDataLayout()),
+                       StackPtr, PtrOff);
+  if (Flags.isByVal())
+    return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, DL);
+
+  return DAG.getStore(
+      Chain, DL, Arg, PtrOff,
+      MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset));
+}
+
+//===----------------------------------------------------------------------===//
+//                                   Call
+//===----------------------------------------------------------------------===//
+
+SDValue M68kTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
+                                      SmallVectorImpl<SDValue> &InVals) const {
+  SelectionDAG &DAG = CLI.DAG;
+  SDLoc &DL = CLI.DL;
+  SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
+  SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
+  SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
+  SDValue Chain = CLI.Chain;
+  SDValue Callee = CLI.Callee;
+  CallingConv::ID CallConv = CLI.CallConv;
+  bool &IsTailCall = CLI.IsTailCall;
+  bool IsVarArg = CLI.IsVarArg;
+
+  MachineFunction &MF = DAG.getMachineFunction();
+  StructReturnType SR = callIsStructReturn(Outs);
+  bool IsSibcall = false;
+  M68kMachineFunctionInfo *MFI = MF.getInfo<M68kMachineFunctionInfo>();
+  // const M68kRegisterInfo *TRI = Subtarget.getRegisterInfo();
+
+  if (CallConv == CallingConv::M68k_INTR)
+    report_fatal_error("M68k interrupts may not be called directly");
+
+  auto Attr = MF.getFunction().getFnAttribute("disable-tail-calls");
+  if (Attr.getValueAsString() == "true")
+    IsTailCall = false;
+
+  // FIXME Add tailcalls support
+
+  bool IsMustTail = CLI.CB && CLI.CB->isMustTailCall();
+  if (IsMustTail) {
+    // Force this to be a tail call.  The verifier rules are enough to ensure
+    // that we can lower this successfully without moving the return address
+    // around.
+    IsTailCall = true;
+  } else if (IsTailCall) {
+    // Check if it's really possible to do a tail call.
+    IsTailCall = IsEligibleForTailCallOptimization(
+        Callee, CallConv, IsVarArg, SR != NotStructReturn,
+        MF.getFunction().hasStructRetAttr(), CLI.RetTy, Outs, OutVals, Ins,
+        DAG);
+
+    // Sibcalls are automatically detected tailcalls which do not require
+    // ABI changes.
+    if (!MF.getTarget().Options.GuaranteedTailCallOpt && IsTailCall)
+      IsSibcall = true;
+
+    if (IsTailCall)
+      ++NumTailCalls;
+  }
+
+  assert(!(IsVarArg && canGuaranteeTCO(CallConv)) &&
+         "Var args not supported with calling convention fastcc");
+
+  // Analyze operands of the call, assigning locations to each operand.
+  SmallVector<CCValAssign, 16> ArgLocs;
+  // It is empty for LibCall
+  const Function *CalleeFunc = CLI.CB ? CLI.CB->getCalledFunction() : nullptr;
+  M68kCCState CCInfo(*CalleeFunc, CallConv, IsVarArg, MF, ArgLocs,
+                     *DAG.getContext());
+  CCInfo.AnalyzeCallOperands(Outs, CC_M68k);
+
+  // Get a count of how many bytes are to be pushed on the stack.
+  unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
+  if (IsSibcall) {
+    // This is a sibcall. The memory operands are available in caller's
+    // own caller's stack.
+    NumBytes = 0;
+  } else if (MF.getTarget().Options.GuaranteedTailCallOpt &&
+             canGuaranteeTCO(CallConv)) {
+    NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
+  }
+
+  int FPDiff = 0;
+  if (IsTailCall && !IsSibcall && !IsMustTail) {
+    // Lower arguments at fp - stackoffset + fp
diff .
+    unsigned NumBytesCallerPushed = MFI->getBytesToPopOnReturn();
+
+    FPDiff = NumBytesCallerPushed - NumBytes;
+
+    // Set the delta of movement of the returnaddr stackslot.
+    // But only set if delta is greater than previous delta.
+    if (FPDiff < MFI->getTCReturnAddrDelta())
+      MFI->setTCReturnAddrDelta(FPDiff);
+  }
+
+  unsigned NumBytesToPush = NumBytes;
+  unsigned NumBytesToPop = NumBytes;
+
+  // If we have an inalloca argument, all stack space has already been allocated
+  // for us and be right at the top of the stack.  We don't support multiple
+  // arguments passed in memory when using inalloca.
+  if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
+    NumBytesToPush = 0;
+    if (!ArgLocs.back().isMemLoc())
+      report_fatal_error("cannot use inalloca attribute on a register "
+                         "parameter");
+    if (ArgLocs.back().getLocMemOffset() != 0)
+      report_fatal_error("any parameter with the inalloca attribute must be "
+                         "the only memory argument");
+  }
+
+  if (!IsSibcall)
+    Chain = DAG.getCALLSEQ_START(Chain, NumBytesToPush,
+                                 NumBytes - NumBytesToPush, DL);
+
+  SDValue RetFI;
+  // Load return address for tail calls.
+  if (IsTailCall && FPDiff)
+    Chain = EmitTailCallLoadRetAddr(DAG, RetFI, Chain, IsTailCall, FPDiff, DL);
+
+  SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
+  SmallVector<SDValue, 8> MemOpChains;
+  SDValue StackPtr;
+
+  // Walk the register/memloc assignments, inserting copies/loads.  In the case
+  // of tail call optimization arguments are handle later.
+  const M68kRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
+  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+    ISD::ArgFlagsTy Flags = Outs[i].Flags;
+
+    // Skip inalloca arguments, they have already been written.
+    if (Flags.isInAlloca())
+      continue;
+
+    CCValAssign &VA = ArgLocs[i];
+    EVT RegVT = VA.getLocVT();
+    SDValue Arg = OutVals[i];
+    bool IsByVal = Flags.isByVal();
+
+    // Promote the value if needed.
+    switch (VA.getLocInfo()) {
+    default:
+      llvm_unreachable("Unknown loc info!");
+    case CCValAssign::Full:
+      break;
+    case CCValAssign::SExt:
+      Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, RegVT, Arg);
+      break;
+    case CCValAssign::ZExt:
+      Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, RegVT, Arg);
+      break;
+    case CCValAssign::AExt:
+      Arg = DAG.getNode(ISD::ANY_EXTEND, DL, RegVT, Arg);
+      break;
+    case CCValAssign::BCvt:
+      Arg = DAG.getBitcast(RegVT, Arg);
+      break;
+    case CCValAssign::Indirect: {
+      // Store the argument.
+      SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
+      int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
+      Chain = DAG.getStore(
+          Chain, DL, Arg, SpillSlot,
+          MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
+      Arg = SpillSlot;
+      break;
+    }
+    }
+
+    if (VA.isRegLoc()) {
+      RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
+    } else if (!IsSibcall && (!IsTailCall || IsByVal)) {
+      assert(VA.isMemLoc());
+      if (!StackPtr.getNode()) {
+        StackPtr = DAG.getCopyFromReg(Chain, DL, RegInfo->getStackRegister(),
+                                      getPointerTy(DAG.getDataLayout()));
+      }
+      MemOpChains.push_back(
+          LowerMemOpCallTo(Chain, StackPtr, Arg, DL, DAG, VA, Flags));
+    }
+  }
+
+  if (!MemOpChains.empty())
+    Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
+
+  // FIXME Make sure PIC style GOT works as expected
+  // The only time GOT is really needed is for Medium-PIC static data
+  // otherwise we are happy with pc-rel or static references
+
+  if (IsVarArg && IsMustTail) {
+    const auto &Forwards = MFI->getForwardedMustTailRegParms();
+    for (const auto &F : Forwards) {
+      SDValue Val = DAG.getCopyFromReg(Chain, DL, F.VReg, F.VT);
+      RegsToPass.push_back(std::make_pair(unsigned(F.PReg), Val));
+    }
+  }
+
+  // For tail calls lower the arguments to the 'real' stack slots.  Sibcalls
+  // don't need this because the eligibility check rejects calls that require
+  // shuffling arguments passed in memory.
+  if (!IsSibcall && IsTailCall) {
+    // Force all the incoming stack arguments to be loaded from the stack
+    // before any new outgoing arguments are stored to the stack, because the
+    // outgoing stack slots may alias the incoming argument stack slots, and
+    // the alias isn't otherwise explicit. This is slightly more conservative
+    // than necessary, because it means that each store effectively depends
+    // on every argument instead of just those arguments it would clobber.
+    SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
+
+    SmallVector<SDValue, 8> MemOpChains2;
+    SDValue FIN;
+    int FI = 0;
+    for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+      CCValAssign &VA = ArgLocs[i];
+      if (VA.isRegLoc())
+        continue;
+      assert(VA.isMemLoc());
+      SDValue Arg = OutVals[i];
+      ISD::ArgFlagsTy Flags = Outs[i].Flags;
+      // Skip inalloca arguments.  They don't require any work.
+      if (Flags.isInAlloca())
+        continue;
+      // Create frame index.
+      int32_t Offset = VA.getLocMemOffset() + FPDiff;
+      uint32_t OpSize = (VA.getLocVT().getSizeInBits() + 7) / 8;
+      FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
+      FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
+
+      if (Flags.isByVal()) {
+        // Copy relative to framepointer.
+        SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset(), DL);
+        if (!StackPtr.getNode()) {
+          StackPtr = DAG.getCopyFromReg(Chain, DL, RegInfo->getStackRegister(),
+                                        getPointerTy(DAG.getDataLayout()));
+        }
+        Source = DAG.getNode(ISD::ADD, DL, getPointerTy(DAG.getDataLayout()),
+                             StackPtr, Source);
+
+        MemOpChains2.push_back(
+            CreateCopyOfByValArgument(Source, FIN, ArgChain, Flags, DAG, DL));
+      } else {
+        // Store relative to framepointer.
+        MemOpChains2.push_back(DAG.getStore(
+            ArgChain, DL, Arg, FIN,
+            MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
+      }
+    }
+
+    if (!MemOpChains2.empty())
+      Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains2);
+
+    // Store the return address to the appropriate stack slot.
+    Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetFI,
+                                     getPointerTy(DAG.getDataLayout()),
+                                     Subtarget.getSlotSize(), FPDiff, DL);
+  }
+
+  // Build a sequence of copy-to-reg nodes chained together with token chain
+  // and flag operands which copy the outgoing args into registers.
+  SDValue InFlag;
+  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
+    Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[i].first,
+                             RegsToPass[i].second, InFlag);
+    InFlag = Chain.getValue(1);
+  }
+
+  if (Callee->getOpcode() == ISD::GlobalAddress) {
+    // If the callee is a GlobalAddress node (quite common, every direct call
+    // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack
+    // it.
+    GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee);
+
+    // We should use extra load for direct calls to dllimported functions in
+    // non-JIT mode.
+    const GlobalValue *GV = G->getGlobal();
+    if (!GV->hasDLLImportStorageClass()) {
+      unsigned char OpFlags = Subtarget.classifyGlobalFunctionReference(GV);
+
+      Callee = DAG.getTargetGlobalAddress(
+          GV, DL, getPointerTy(DAG.getDataLayout()), G->getOffset(), OpFlags);
+
+      if (OpFlags == M68kII::MO_GOTPCREL) {
+
+        // Add a wrapper.
+        Callee = DAG.getNode(M68kISD::WrapperPC, DL,
+                             getPointerTy(DAG.getDataLayout()), Callee);
+
+        // Add extra indirection
+        Callee = DAG.getLoad(
+            getPointerTy(DAG.getDataLayout()), DL, DAG.getEntryNode(), Callee,
+            MachinePointerInfo::getGOT(DAG.getMachineFunction()));
+      }
+    }
+  } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
+    const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
+    unsigned char OpFlags =
+        Subtarget.classifyGlobalFunctionReference(nullptr, *Mod);
+
+    Callee = DAG.getTargetExternalSymbol(
+        S->getSymbol(), getPointerTy(DAG.getDataLayout()), OpFlags);
+  }
+
+  // Returns a chain & a flag for retval copy to use.
+  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
+  SmallVector<SDValue, 8> Ops;
+
+  if (!IsSibcall && IsTailCall) {
+    Chain = DAG.getCALLSEQ_END(Chain,
+                               DAG.getIntPtrConstant(NumBytesToPop, DL, true),
+                               DAG.getIntPtrConstant(0, DL, true), InFlag, DL);
+    InFlag = Chain.getValue(1);
+  }
+
+  Ops.push_back(Chain);
+  Ops.push_back(Callee);
+
+  if (IsTailCall)
+    Ops.push_back(DAG.getConstant(FPDiff, DL, MVT::i32));
+
+  // Add argument registers to the end of the list so that they are known live
+  // into the call.
+  for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
+    Ops.push_back(DAG.getRegister(RegsToPass[i].first,
+                                  RegsToPass[i].second.getValueType()));
+
+  // Add a register mask operand representing the call-preserved registers.
+  const uint32_t *Mask = RegInfo->getCallPreservedMask(MF, CallConv);
+  assert(Mask && "Missing call preserved mask for calling convention");
+
+  Ops.push_back(DAG.getRegisterMask(Mask));
+
+  if (InFlag.getNode())
+    Ops.push_back(InFlag);
+
+  if (IsTailCall) {
+    MF.getFrameInfo().setHasTailCall();
+    return DAG.getNode(M68kISD::TC_RETURN, DL, NodeTys, Ops);
+  }
+
+  Chain = DAG.getNode(M68kISD::CALL, DL, NodeTys, Ops);
+  InFlag = Chain.getValue(1);
+
+  // Create the CALLSEQ_END node.
+  unsigned NumBytesForCalleeToPop;
+  if (M68k::isCalleePop(CallConv, IsVarArg,
+                        DAG.getTarget().Options.GuaranteedTailCallOpt)) {
+    NumBytesForCalleeToPop = NumBytes; // Callee pops everything
+  } else if (!canGuaranteeTCO(CallConv) && SR == StackStructReturn) {
+    // If this is a call to a struct-return function, the callee
+    // pops the hidden struct pointer, so we have to push it back.
+    NumBytesForCalleeToPop = 4;
+  } else {
+    NumBytesForCalleeToPop = 0; // Callee pops nothing.
+  }
+
+  if (CLI.DoesNotReturn && !getTargetMachine().Options.TrapUnreachable) {
+    // No need to reset the stack after the call if the call doesn't return. To
+    // make the MI verify, we'll pretend the callee does it for us.
+    NumBytesForCalleeToPop = NumBytes;
+  }
+
+  // Returns a flag for retval copy to use.
+  if (!IsSibcall) {
+    Chain = DAG.getCALLSEQ_END(
+        Chain, DAG.getIntPtrConstant(NumBytesToPop, DL, true),
+        DAG.getIntPtrConstant(NumBytesForCalleeToPop, DL, true), InFlag, DL);
+    InFlag = Chain.getValue(1);
+  }
+
+  // Handle result values, copying them out of physregs into vregs that we
+  // return.
+  return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG,
+                         InVals);
+}
+
+SDValue M68kTargetLowering::LowerCallResult(
+    SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg,
+    const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
+    SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
+
+  // Assign locations to each value returned by this call.
+  SmallVector<CCValAssign, 16> RVLocs;
+  CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
+                 *DAG.getContext());
+  CCInfo.AnalyzeCallResult(Ins, RetCC_M68k);
+
+  // Copy all of the result registers out of their specified physreg.
+  for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
+    CCValAssign &VA = RVLocs[i];
+    EVT CopyVT = VA.getLocVT();
+
+    /// ??? is this correct?
+    Chain = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), CopyVT, InFlag)
+                .getValue(1);
+    SDValue Val = Chain.getValue(0);
+
+    if (VA.isExtInLoc() && VA.getValVT().getScalarType() == MVT::i1)
+      Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
+
+    InFlag = Chain.getValue(2);
+    InVals.push_back(Val);
+  }
+
+  return Chain;
+}
+
+//===----------------------------------------------------------------------===//
+//            Formal Arguments Calling Convention Implementation
+//===----------------------------------------------------------------------===//
+
+SDValue M68kTargetLowering::LowerFormalArguments(
+    SDValue Chain, CallingConv::ID CCID, bool IsVarArg,
+    const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
+    SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
+  MachineFunction &MF = DAG.getMachineFunction();
+  M68kMachineFunctionInfo *MMFI = MF.getInfo<M68kMachineFunctionInfo>();
+  // const TargetFrameLowering &TFL = *Subtarget.getFrameLowering();
+
+  MachineFrameInfo &MFI = MF.getFrameInfo();
+
+  // Assign locations to all of the incoming arguments.
+  SmallVector<CCValAssign, 16> ArgLocs;
+  M68kCCState CCInfo(MF.getFunction(), CCID, IsVarArg, MF, ArgLocs,
+                     *DAG.getContext());
+
+  CCInfo.AnalyzeFormalArguments(Ins, CC_M68k);
+
+  unsigned LastVal = ~0U;
+  SDValue ArgValue;
+  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+    CCValAssign &VA = ArgLocs[i];
+    assert(VA.getValNo() != LastVal && "Same value in 
diff erent locations");
+
+    LastVal = VA.getValNo();
+
+    if (VA.isRegLoc()) {
+      EVT RegVT = VA.getLocVT();
+      const TargetRegisterClass *RC;
+      if (RegVT == MVT::i32)
+        RC = &M68k::XR32RegClass;
+      else
+        llvm_unreachable("Unknown argument type!");
+
+      unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
+      ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegVT);
+
+      // If this is an 8 or 16-bit value, it is really passed promoted to 32
+      // bits.  Insert an assert[sz]ext to capture this, then truncate to the
+      // right size.
+      if (VA.getLocInfo() == CCValAssign::SExt) {
+        ArgValue = DAG.getNode(ISD::AssertSext, DL, RegVT, ArgValue,
+                               DAG.getValueType(VA.getValVT()));
+      } else if (VA.getLocInfo() == CCValAssign::ZExt) {
+        ArgValue = DAG.getNode(ISD::AssertZext, DL, RegVT, ArgValue,
+                               DAG.getValueType(VA.getValVT()));
+      } else if (VA.getLocInfo() == CCValAssign::BCvt) {
+        ArgValue = DAG.getBitcast(VA.getValVT(), ArgValue);
+      }
+
+      if (VA.isExtInLoc()) {
+        ArgValue = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), ArgValue);
+      }
+    } else {
+      assert(VA.isMemLoc());
+      ArgValue = LowerMemArgument(Chain, CCID, Ins, DL, DAG, VA, MFI, i);
+    }
+
+    // If value is passed via pointer - do a load.
+    // TODO Make sure this handling on indirect arguments is correct
+    if (VA.getLocInfo() == CCValAssign::Indirect)
+      ArgValue =
+          DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue, MachinePointerInfo());
+
+    InVals.push_back(ArgValue);
+  }
+
+  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+    // Swift calling convention does not require we copy the sret argument
+    // into %D0 for the return. We don't set SRetReturnReg for Swift.
+    if (CCID == CallingConv::Swift)
+      continue;
+
+    // ABI require that for returning structs by value we copy the sret argument
+    // into %D0 for the return. Save the argument into a virtual register so
+    // that we can access it from the return points.
+    if (Ins[i].Flags.isSRet()) {
+      unsigned Reg = MMFI->getSRetReturnReg();
+      if (!Reg) {
+        MVT PtrTy = getPointerTy(DAG.getDataLayout());
+        Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
+        MMFI->setSRetReturnReg(Reg);
+      }
+      SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), DL, Reg, InVals[i]);
+      Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Copy, Chain);
+      break;
+    }
+  }
+
+  unsigned StackSize = CCInfo.getNextStackOffset();
+  // Align stack specially for tail calls.
+  if (shouldGuaranteeTCO(CCID, MF.getTarget().Options.GuaranteedTailCallOpt))
+    StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
+
+  // If the function takes variable number of arguments, make a frame index for
+  // the start of the first vararg value... for expansion of llvm.va_start. We
+  // can skip this if there are no va_start calls.
+  if (MFI.hasVAStart()) {
+    MMFI->setVarArgsFrameIndex(MFI.CreateFixedObject(1, StackSize, true));
+  }
+
+  if (IsVarArg && MFI.hasMustTailInVarArgFunc()) {
+    // We forward some GPRs and some vector types.
+    SmallVector<MVT, 2> RegParmTypes;
+    MVT IntVT = MVT::i32;
+    RegParmTypes.push_back(IntVT);
+
+    // Compute the set of forwarded registers. The rest are scratch.
+    // ??? what is this for?
+    SmallVectorImpl<ForwardedRegister> &Forwards =
+        MMFI->getForwardedMustTailRegParms();
+    CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_M68k);
+
+    // Copy all forwards from physical to virtual registers.
+    for (ForwardedRegister &F : Forwards) {
+      // FIXME Can we use a less constrained schedule?
+      SDValue RegVal = DAG.getCopyFromReg(Chain, DL, F.VReg, F.VT);
+      F.VReg = MF.getRegInfo().createVirtualRegister(getRegClassFor(F.VT));
+      Chain = DAG.getCopyToReg(Chain, DL, F.VReg, RegVal);
+    }
+  }
+
+  // Some CCs need callee pop.
+  if (M68k::isCalleePop(CCID, IsVarArg,
+                        MF.getTarget().Options.GuaranteedTailCallOpt)) {
+    MMFI->setBytesToPopOnReturn(StackSize); // Callee pops everything.
+  } else {
+    MMFI->setBytesToPopOnReturn(0); // Callee pops nothing.
+    // If this is an sret function, the return should pop the hidden pointer.
+    if (!canGuaranteeTCO(CCID) && argsAreStructReturn(Ins) == StackStructReturn)
+      MMFI->setBytesToPopOnReturn(4);
+  }
+
+  MMFI->setArgumentStackSize(StackSize);
+
+  return Chain;
+}
+
+//===----------------------------------------------------------------------===//
+//              Return Value Calling Convention Implementation
+//===----------------------------------------------------------------------===//
+
+SDValue
+M68kTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CCID,
+                                bool IsVarArg,
+                                const SmallVectorImpl<ISD::OutputArg> &Outs,
+                                const SmallVectorImpl<SDValue> &OutVals,
+                                const SDLoc &DL, SelectionDAG &DAG) const {
+  MachineFunction &MF = DAG.getMachineFunction();
+  M68kMachineFunctionInfo *MFI = MF.getInfo<M68kMachineFunctionInfo>();
+
+  SmallVector<CCValAssign, 16> RVLocs;
+  CCState CCInfo(CCID, IsVarArg, MF, RVLocs, *DAG.getContext());
+  CCInfo.AnalyzeReturn(Outs, RetCC_M68k);
+
+  SDValue Flag;
+  SmallVector<SDValue, 6> RetOps;
+  // Operand #0 = Chain (updated below)
+  RetOps.push_back(Chain);
+  // Operand #1 = Bytes To Pop
+  RetOps.push_back(
+      DAG.getTargetConstant(MFI->getBytesToPopOnReturn(), DL, MVT::i32));
+
+  // Copy the result values into the output registers.
+  for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
+    CCValAssign &VA = RVLocs[i];
+    assert(VA.isRegLoc() && "Can only return in registers!");
+    SDValue ValToCopy = OutVals[i];
+    EVT ValVT = ValToCopy.getValueType();
+
+    // Promote values to the appropriate types.
+    if (VA.getLocInfo() == CCValAssign::SExt)
+      ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), ValToCopy);
+    else if (VA.getLocInfo() == CCValAssign::ZExt)
+      ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), ValToCopy);
+    else if (VA.getLocInfo() == CCValAssign::AExt) {
+      if (ValVT.isVector() && ValVT.getVectorElementType() == MVT::i1)
+        ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), ValToCopy);
+      else
+        ValToCopy = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), ValToCopy);
+    } else if (VA.getLocInfo() == CCValAssign::BCvt)
+      ValToCopy = DAG.getBitcast(VA.getLocVT(), ValToCopy);
+
+    Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), ValToCopy, Flag);
+    Flag = Chain.getValue(1);
+    RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
+  }
+
+  // Swift calling convention does not require we copy the sret argument
+  // into %d0 for the return, and SRetReturnReg is not set for Swift.
+
+  // ABI require that for returning structs by value we copy the sret argument
+  // into %D0 for the return. Save the argument into a virtual register so that
+  // we can access it from the return points.
+  //
+  // Checking Function.hasStructRetAttr() here is insufficient because the IR
+  // may not have an explicit sret argument. If MFI.CanLowerReturn is
+  // false, then an sret argument may be implicitly inserted in the SelDAG. In
+  // either case MFI->setSRetReturnReg() will have been called.
+  if (unsigned SRetReg = MFI->getSRetReturnReg()) {
+    // ??? Can i just move this to the top and escape this explanation?
+    // When we have both sret and another return value, we should use the
+    // original Chain stored in RetOps[0], instead of the current Chain updated
+    // in the above loop. If we only have sret, RetOps[0] equals to Chain.
+
+    // For the case of sret and another return value, we have
+    //   Chain_0 at the function entry
+    //   Chain_1 = getCopyToReg(Chain_0) in the above loop
+    // If we use Chain_1 in getCopyFromReg, we will have
+    //   Val = getCopyFromReg(Chain_1)
+    //   Chain_2 = getCopyToReg(Chain_1, Val) from below
+
+    // getCopyToReg(Chain_0) will be glued together with
+    // getCopyToReg(Chain_1, Val) into Unit A, getCopyFromReg(Chain_1) will be
+    // in Unit B, and we will have cyclic dependency between Unit A and Unit B:
+    //   Data dependency from Unit B to Unit A due to usage of Val in
+    //     getCopyToReg(Chain_1, Val)
+    //   Chain dependency from Unit A to Unit B
+
+    // So here, we use RetOps[0] (i.e Chain_0) for getCopyFromReg.
+    SDValue Val = DAG.getCopyFromReg(RetOps[0], DL, SRetReg,
+                                     getPointerTy(MF.getDataLayout()));
+
+    // ??? How will this work if CC does not use registers for args passing?
+    // ??? What if I return multiple structs?
+    unsigned RetValReg = M68k::D0;
+    Chain = DAG.getCopyToReg(Chain, DL, RetValReg, Val, Flag);
+    Flag = Chain.getValue(1);
+
+    RetOps.push_back(
+        DAG.getRegister(RetValReg, getPointerTy(DAG.getDataLayout())));
+  }
+
+  RetOps[0] = Chain; // Update chain.
+
+  // Add the flag if we have it.
+  if (Flag.getNode())
+    RetOps.push_back(Flag);
+
+  return DAG.getNode(M68kISD::RET, DL, MVT::Other, RetOps);
+}
+
+//===----------------------------------------------------------------------===//
+//                Fast Calling Convention (tail call) implementation
+//===----------------------------------------------------------------------===//
+
+//  Like std call, callee cleans arguments, convention except that ECX is
+//  reserved for storing the tail called function address. Only 2 registers are
+//  free for argument passing (inreg). Tail call optimization is performed
+//  provided:
+//                * tailcallopt is enabled
+//                * caller/callee are fastcc
+//  On M68k_64 architecture with GOT-style position independent code only
+//  local (within module) calls are supported at the moment. To keep the stack
+//  aligned according to platform abi the function GetAlignedArgumentStackSize
+//  ensures that argument delta is always multiples of stack alignment. (Dynamic
+//  linkers need this - darwin's dyld for example) If a tail called function
+//  callee has more arguments than the caller the caller needs to make sure that
+//  there is room to move the RETADDR to. This is achieved by reserving an area
+//  the size of the argument delta right after the original RETADDR, but before
+//  the saved framepointer or the spilled registers e.g. caller(arg1, arg2)
+//  calls callee(arg1, arg2,arg3,arg4) stack layout:
+//    arg1
+//    arg2
+//    RETADDR
+//    [ new RETADDR
+//      move area ]
+//    (possible EBP)
+//    ESI
+//    EDI
+//    local1 ..
+
+/// Make the stack size align e.g 16n + 12 aligned for a 16-byte align
+/// requirement.
+unsigned
+M68kTargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
+                                                SelectionDAG &DAG) const {
+  const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
+  unsigned StackAlignment = TFI.getStackAlignment();
+  uint64_t AlignMask = StackAlignment - 1;
+  int64_t Offset = StackSize;
+  unsigned SlotSize = Subtarget.getSlotSize();
+  if ((Offset & AlignMask) <= (StackAlignment - SlotSize)) {
+    // Number smaller than 12 so just add the 
diff erence.
+    Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
+  } else {
+    // Mask out lower bits, add stackalignment once plus the 12 bytes.
+    Offset =
+        ((~AlignMask) & Offset) + StackAlignment + (StackAlignment - SlotSize);
+  }
+  return Offset;
+}
+
+/// Check whether the call is eligible for tail call optimization. Targets
+/// that want to do tail call optimization should implement this function.
+bool M68kTargetLowering::IsEligibleForTailCallOptimization(
+    SDValue Callee, CallingConv::ID CalleeCC, bool IsVarArg,
+    bool IsCalleeStructRet, bool IsCallerStructRet, Type *RetTy,
+    const SmallVectorImpl<ISD::OutputArg> &Outs,
+    const SmallVectorImpl<SDValue> &OutVals,
+    const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
+  if (!mayTailCallThisCC(CalleeCC))
+    return false;
+
+  // If -tailcallopt is specified, make fastcc functions tail-callable.
+  MachineFunction &MF = DAG.getMachineFunction();
+  const auto &CallerF = MF.getFunction();
+
+  CallingConv::ID CallerCC = CallerF.getCallingConv();
+  bool CCMatch = CallerCC == CalleeCC;
+
+  if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
+    if (canGuaranteeTCO(CalleeCC) && CCMatch)
+      return true;
+    return false;
+  }
+
+  // Look for obvious safe cases to perform tail call optimization that do not
+  // require ABI changes. This is what gcc calls sibcall.
+
+  // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
+  // emit a special epilogue.
+  const M68kRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
+  if (RegInfo->needsStackRealignment(MF))
+    return false;
+
+  // Also avoid sibcall optimization if either caller or callee uses struct
+  // return semantics.
+  if (IsCalleeStructRet || IsCallerStructRet)
+    return false;
+
+  // Do not sibcall optimize vararg calls unless all arguments are passed via
+  // registers.
+  LLVMContext &C = *DAG.getContext();
+  if (IsVarArg && !Outs.empty()) {
+
+    SmallVector<CCValAssign, 16> ArgLocs;
+    CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, C);
+
+    CCInfo.AnalyzeCallOperands(Outs, CC_M68k);
+    for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
+      if (!ArgLocs[i].isRegLoc())
+        return false;
+  }
+
+  // Check that the call results are passed in the same way.
+  if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins, RetCC_M68k,
+                                  RetCC_M68k))
+    return false;
+
+  // The callee has to preserve all registers the caller needs to preserve.
+  const M68kRegisterInfo *TRI = Subtarget.getRegisterInfo();
+  const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
+  if (!CCMatch) {
+    const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
+    if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
+      return false;
+  }
+
+  unsigned StackArgsSize = 0;
+
+  // If the callee takes no arguments then go on to check the results of the
+  // call.
+  if (!Outs.empty()) {
+    // Check if stack adjustment is needed. For now, do not do this if any
+    // argument is passed on the stack.
+    SmallVector<CCValAssign, 16> ArgLocs;
+    CCState CCInfo(CalleeCC, IsVarArg, MF, ArgLocs, C);
+
+    CCInfo.AnalyzeCallOperands(Outs, CC_M68k);
+    StackArgsSize = CCInfo.getNextStackOffset();
+
+    if (CCInfo.getNextStackOffset()) {
+      // Check if the arguments are already laid out in the right way as
+      // the caller's fixed stack objects.
+      MachineFrameInfo &MFI = MF.getFrameInfo();
+      const MachineRegisterInfo *MRI = &MF.getRegInfo();
+      const M68kInstrInfo *TII = Subtarget.getInstrInfo();
+      for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+        CCValAssign &VA = ArgLocs[i];
+        SDValue Arg = OutVals[i];
+        ISD::ArgFlagsTy Flags = Outs[i].Flags;
+        if (VA.getLocInfo() == CCValAssign::Indirect)
+          return false;
+        if (!VA.isRegLoc()) {
+          if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, MFI, MRI,
+                                   TII, VA))
+            return false;
+        }
+      }
+    }
+
+    bool PositionIndependent = isPositionIndependent();
+    // If the tailcall address may be in a register, then make sure it's
+    // possible to register allocate for it. The call address can
+    // only target %A0 or %A1 since the tail call must be scheduled after
+    // callee-saved registers are restored. These happen to be the same
+    // registers used to pass 'inreg' arguments so watch out for those.
+    if ((!isa<GlobalAddressSDNode>(Callee) &&
+         !isa<ExternalSymbolSDNode>(Callee)) ||
+        PositionIndependent) {
+      unsigned NumInRegs = 0;
+      // In PIC we need an extra register to formulate the address computation
+      // for the callee.
+      unsigned MaxInRegs = PositionIndependent ? 1 : 2;
+
+      for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+        CCValAssign &VA = ArgLocs[i];
+        if (!VA.isRegLoc())
+          continue;
+        unsigned Reg = VA.getLocReg();
+        switch (Reg) {
+        default:
+          break;
+        case M68k::A0:
+        case M68k::A1:
+          if (++NumInRegs == MaxInRegs)
+            return false;
+          break;
+        }
+      }
+    }
+
+    const MachineRegisterInfo &MRI = MF.getRegInfo();
+    if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
+      return false;
+  }
+
+  bool CalleeWillPop = M68k::isCalleePop(
+      CalleeCC, IsVarArg, MF.getTarget().Options.GuaranteedTailCallOpt);
+
+  if (unsigned BytesToPop =
+          MF.getInfo<M68kMachineFunctionInfo>()->getBytesToPopOnReturn()) {
+    // If we have bytes to pop, the callee must pop them.
+    bool CalleePopMatches = CalleeWillPop && BytesToPop == StackArgsSize;
+    if (!CalleePopMatches)
+      return false;
+  } else if (CalleeWillPop && StackArgsSize > 0) {
+    // If we don't have bytes to pop, make sure the callee doesn't pop any.
+    return false;
+  }
+
+  return true;
+}
+
+//===----------------------------------------------------------------------===//
+// Custom Lower
+//===----------------------------------------------------------------------===//
+
+SDValue M68kTargetLowering::LowerOperation(SDValue Op,
+                                           SelectionDAG &DAG) const {
+  switch (Op.getOpcode()) {
+  default:
+    llvm_unreachable("Should not custom lower this!");
+  case ISD::SADDO:
+  case ISD::UADDO:
+  case ISD::SSUBO:
+  case ISD::USUBO:
+  case ISD::SMULO:
+  case ISD::UMULO:
+    return LowerXALUO(Op, DAG);
+  case ISD::SETCC:
+    return LowerSETCC(Op, DAG);
+  case ISD::SETCCCARRY:
+    return LowerSETCCCARRY(Op, DAG);
+  case ISD::SELECT:
+    return LowerSELECT(Op, DAG);
+  case ISD::BRCOND:
+    return LowerBRCOND(Op, DAG);
+  case ISD::ADDC:
+  case ISD::ADDE:
+  case ISD::SUBC:
+  case ISD::SUBE:
+    return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
+  case ISD::ConstantPool:
+    return LowerConstantPool(Op, DAG);
+  case ISD::GlobalAddress:
+    return LowerGlobalAddress(Op, DAG);
+  case ISD::ExternalSymbol:
+    return LowerExternalSymbol(Op, DAG);
+  case ISD::BlockAddress:
+    return LowerBlockAddress(Op, DAG);
+  case ISD::JumpTable:
+    return LowerJumpTable(Op, DAG);
+  case ISD::VASTART:
+    return LowerVASTART(Op, DAG);
+  case ISD::DYNAMIC_STACKALLOC:
+    return LowerDYNAMIC_STACKALLOC(Op, DAG);
+  }
+}
+
+bool M68kTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
+                                                SDValue C) const {
+  // Shifts and add instructions in M68000 and M68010 support
+  // up to 32 bits, but mul only has 16-bit variant. So it's almost
+  // certainly beneficial to lower 8/16/32-bit mul to their
+  // add / shifts counterparts. But for 64-bits mul, it might be
+  // safer to just leave it to compiler runtime implementations.
+  return VT.bitsLE(MVT::i32) || Subtarget.atLeastM68020();
+}
+
+SDValue M68kTargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const {
+  // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
+  // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
+  // looks for this combo and may remove the "setcc" instruction if the "setcc"
+  // has only one use.
+  SDNode *N = Op.getNode();
+  SDValue LHS = N->getOperand(0);
+  SDValue RHS = N->getOperand(1);
+  unsigned BaseOp = 0;
+  unsigned Cond = 0;
+  SDLoc DL(Op);
+  switch (Op.getOpcode()) {
+  default:
+    llvm_unreachable("Unknown ovf instruction!");
+  case ISD::SADDO:
+    BaseOp = M68kISD::ADD;
+    Cond = M68k::COND_VS;
+    break;
+  case ISD::UADDO:
+    BaseOp = M68kISD::ADD;
+    Cond = M68k::COND_CS;
+    break;
+  case ISD::SSUBO:
+    BaseOp = M68kISD::SUB;
+    Cond = M68k::COND_VS;
+    break;
+  case ISD::USUBO:
+    BaseOp = M68kISD::SUB;
+    Cond = M68k::COND_CS;
+    break;
+  }
+
+  // Also sets CCR.
+  SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i8);
+  SDValue Arith = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
+  SDValue SetCC = DAG.getNode(M68kISD::SETCC, DL, N->getValueType(1),
+                              DAG.getConstant(Cond, DL, MVT::i8),
+                              SDValue(Arith.getNode(), 1));
+
+  return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Arith, SetCC);
+}
+
+/// Create a BT (Bit Test) node - Test bit \p BitNo in \p Src and set condition
+/// according to equal/not-equal condition code \p CC.
+static SDValue getBitTestCondition(SDValue Src, SDValue BitNo, ISD::CondCode CC,
+                                   const SDLoc &DL, SelectionDAG &DAG) {
+  // If Src is i8, promote it to i32 with any_extend.  There is no i8 BT
+  // instruction.  Since the shift amount is in-range-or-undefined, we know
+  // that doing a bittest on the i32 value is ok.
+  if (Src.getValueType() == MVT::i8 || Src.getValueType() == MVT::i16)
+    Src = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Src);
+
+  // If the operand types disagree, extend the shift amount to match.  Since
+  // BT ignores high bits (like shifts) we can use anyextend.
+  if (Src.getValueType() != BitNo.getValueType())
+    BitNo = DAG.getNode(ISD::ANY_EXTEND, DL, Src.getValueType(), BitNo);
+
+  SDValue BT = DAG.getNode(M68kISD::BT, DL, MVT::i32, Src, BitNo);
+
+  // NOTE BTST sets CCR.Z flag
+  M68k::CondCode Cond = CC == ISD::SETEQ ? M68k::COND_NE : M68k::COND_EQ;
+  return DAG.getNode(M68kISD::SETCC, DL, MVT::i8,
+                     DAG.getConstant(Cond, DL, MVT::i8), BT);
+}
+
+/// Result of 'and' is compared against zero. Change to a BT node if possible.
+static SDValue LowerAndToBT(SDValue And, ISD::CondCode CC, const SDLoc &DL,
+                            SelectionDAG &DAG) {
+  SDValue Op0 = And.getOperand(0);
+  SDValue Op1 = And.getOperand(1);
+  if (Op0.getOpcode() == ISD::TRUNCATE)
+    Op0 = Op0.getOperand(0);
+  if (Op1.getOpcode() == ISD::TRUNCATE)
+    Op1 = Op1.getOperand(0);
+
+  SDValue LHS, RHS;
+  if (Op1.getOpcode() == ISD::SHL)
+    std::swap(Op0, Op1);
+  if (Op0.getOpcode() == ISD::SHL) {
+    if (isOneConstant(Op0.getOperand(0))) {
+      // If we looked past a truncate, check that it's only truncating away
+      // known zeros.
+      unsigned BitWidth = Op0.getValueSizeInBits();
+      unsigned AndBitWidth = And.getValueSizeInBits();
+      if (BitWidth > AndBitWidth) {
+        auto Known = DAG.computeKnownBits(Op0);
+        if (Known.countMinLeadingZeros() < BitWidth - AndBitWidth)
+          return SDValue();
+      }
+      LHS = Op1;
+      RHS = Op0.getOperand(1);
+    }
+  } else if (auto *AndRHS = dyn_cast<ConstantSDNode>(Op1)) {
+    uint64_t AndRHSVal = AndRHS->getZExtValue();
+    SDValue AndLHS = Op0;
+
+    if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
+      LHS = AndLHS.getOperand(0);
+      RHS = AndLHS.getOperand(1);
+    }
+
+    // Use BT if the immediate can't be encoded in a TEST instruction.
+    if (!isUInt<32>(AndRHSVal) && isPowerOf2_64(AndRHSVal)) {
+      LHS = AndLHS;
+      RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), DL, LHS.getValueType());
+    }
+  }
+
+  if (LHS.getNode())
+    return getBitTestCondition(LHS, RHS, CC, DL, DAG);
+
+  return SDValue();
+}
+
+static M68k::CondCode TranslateIntegerM68kCC(ISD::CondCode SetCCOpcode) {
+  switch (SetCCOpcode) {
+  default:
+    llvm_unreachable("Invalid integer condition!");
+  case ISD::SETEQ:
+    return M68k::COND_EQ;
+  case ISD::SETGT:
+    return M68k::COND_GT;
+  case ISD::SETGE:
+    return M68k::COND_GE;
+  case ISD::SETLT:
+    return M68k::COND_LT;
+  case ISD::SETLE:
+    return M68k::COND_LE;
+  case ISD::SETNE:
+    return M68k::COND_NE;
+  case ISD::SETULT:
+    return M68k::COND_CS;
+  case ISD::SETUGE:
+    return M68k::COND_CC;
+  case ISD::SETUGT:
+    return M68k::COND_HI;
+  case ISD::SETULE:
+    return M68k::COND_LS;
+  }
+}
+
+/// Do a one-to-one translation of a ISD::CondCode to the M68k-specific
+/// condition code, returning the condition code and the LHS/RHS of the
+/// comparison to make.
+static unsigned TranslateM68kCC(ISD::CondCode SetCCOpcode, const SDLoc &DL,
+                                bool IsFP, SDValue &LHS, SDValue &RHS,
+                                SelectionDAG &DAG) {
+  if (!IsFP) {
+    if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
+      if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
+        // X > -1   -> X == 0, jump !sign.
+        RHS = DAG.getConstant(0, DL, RHS.getValueType());
+        return M68k::COND_PL;
+      }
+      if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
+        // X < 0   -> X == 0, jump on sign.
+        return M68k::COND_MI;
+      }
+      if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) {
+        // X < 1   -> X <= 0
+        RHS = DAG.getConstant(0, DL, RHS.getValueType());
+        return M68k::COND_LE;
+      }
+    }
+
+    return TranslateIntegerM68kCC(SetCCOpcode);
+  }
+
+  // First determine if it is required or is profitable to flip the operands.
+
+  // If LHS is a foldable load, but RHS is not, flip the condition.
+  if (ISD::isNON_EXTLoad(LHS.getNode()) && !ISD::isNON_EXTLoad(RHS.getNode())) {
+    SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
+    std::swap(LHS, RHS);
+  }
+
+  switch (SetCCOpcode) {
+  default:
+    break;
+  case ISD::SETOLT:
+  case ISD::SETOLE:
+  case ISD::SETUGT:
+  case ISD::SETUGE:
+    std::swap(LHS, RHS);
+    break;
+  }
+
+  // On a floating point condition, the flags are set as follows:
+  // ZF  PF  CF   op
+  //  0 | 0 | 0 | X > Y
+  //  0 | 0 | 1 | X < Y
+  //  1 | 0 | 0 | X == Y
+  //  1 | 1 | 1 | unordered
+  switch (SetCCOpcode) {
+  default:
+    llvm_unreachable("Condcode should be pre-legalized away");
+  case ISD::SETUEQ:
+  case ISD::SETEQ:
+    return M68k::COND_EQ;
+  case ISD::SETOLT: // flipped
+  case ISD::SETOGT:
+  case ISD::SETGT:
+    return M68k::COND_HI;
+  case ISD::SETOLE: // flipped
+  case ISD::SETOGE:
+  case ISD::SETGE:
+    return M68k::COND_CC;
+  case ISD::SETUGT: // flipped
+  case ISD::SETULT:
+  case ISD::SETLT:
+    return M68k::COND_CS;
+  case ISD::SETUGE: // flipped
+  case ISD::SETULE:
+  case ISD::SETLE:
+    return M68k::COND_LS;
+  case ISD::SETONE:
+  case ISD::SETNE:
+    return M68k::COND_NE;
+  case ISD::SETOEQ:
+  case ISD::SETUNE:
+    return M68k::COND_INVALID;
+  }
+}
+
+// Convert (truncate (srl X, N) to i1) to (bt X, N)
+static SDValue LowerTruncateToBT(SDValue Op, ISD::CondCode CC, const SDLoc &DL,
+                                 SelectionDAG &DAG) {
+
+  assert(Op.getOpcode() == ISD::TRUNCATE && Op.getValueType() == MVT::i1 &&
+         "Expected TRUNCATE to i1 node");
+
+  if (Op.getOperand(0).getOpcode() != ISD::SRL)
+    return SDValue();
+
+  SDValue ShiftRight = Op.getOperand(0);
+  return getBitTestCondition(ShiftRight.getOperand(0), ShiftRight.getOperand(1),
+                             CC, DL, DAG);
+}
+
+/// \brief return true if \c Op has a use that doesn't just read flags.
+static bool hasNonFlagsUse(SDValue Op) {
+  for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
+       ++UI) {
+    SDNode *User = *UI;
+    unsigned UOpNo = UI.getOperandNo();
+    if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
+      // Look pass truncate.
+      UOpNo = User->use_begin().getOperandNo();
+      User = *User->use_begin();
+    }
+
+    if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
+        !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
+      return true;
+  }
+  return false;
+}
+
+SDValue M68kTargetLowering::EmitTest(SDValue Op, unsigned M68kCC,
+                                     const SDLoc &DL, SelectionDAG &DAG) const {
+
+  // CF and OF aren't always set the way we want. Determine which
+  // of these we need.
+  bool NeedCF = false;
+  bool NeedOF = false;
+  switch (M68kCC) {
+  default:
+    break;
+  case M68k::COND_HI:
+  case M68k::COND_CC:
+  case M68k::COND_CS:
+  case M68k::COND_LS:
+    NeedCF = true;
+    break;
+  case M68k::COND_GT:
+  case M68k::COND_GE:
+  case M68k::COND_LT:
+  case M68k::COND_LE:
+  case M68k::COND_VS:
+  case M68k::COND_VC: {
+    // Check if we really need to set the
+    // Overflow flag. If NoSignedWrap is present
+    // that is not actually needed.
+    switch (Op->getOpcode()) {
+    case ISD::ADD:
+    case ISD::SUB:
+    case ISD::MUL:
+    case ISD::SHL: {
+      if (Op.getNode()->getFlags().hasNoSignedWrap())
+        break;
+      LLVM_FALLTHROUGH;
+    }
+    default:
+      NeedOF = true;
+      break;
+    }
+    break;
+  }
+  }
+  // See if we can use the CCR value from the operand instead of
+  // doing a separate TEST. TEST always sets OF and CF to 0, so unless
+  // we prove that the arithmetic won't overflow, we can't use OF or CF.
+  if (Op.getResNo() != 0 || NeedOF || NeedCF) {
+    // Emit a CMP with 0, which is the TEST pattern.
+    return DAG.getNode(M68kISD::CMP, DL, MVT::i8,
+                       DAG.getConstant(0, DL, Op.getValueType()), Op);
+  }
+  unsigned Opcode = 0;
+  unsigned NumOperands = 0;
+
+  // Truncate operations may prevent the merge of the SETCC instruction
+  // and the arithmetic instruction before it. Attempt to truncate the operands
+  // of the arithmetic instruction and use a reduced bit-width instruction.
+  bool NeedTruncation = false;
+  SDValue ArithOp = Op;
+  if (Op->getOpcode() == ISD::TRUNCATE && Op->hasOneUse()) {
+    SDValue Arith = Op->getOperand(0);
+    // Both the trunc and the arithmetic op need to have one user each.
+    if (Arith->hasOneUse())
+      switch (Arith.getOpcode()) {
+      default:
+        break;
+      case ISD::ADD:
+      case ISD::SUB:
+      case ISD::AND:
+      case ISD::OR:
+      case ISD::XOR: {
+        NeedTruncation = true;
+        ArithOp = Arith;
+      }
+      }
+  }
+
+  // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
+  // which may be the result of a CAST.  We use the variable 'Op', which is the
+  // non-casted variable when we check for possible users.
+  switch (ArithOp.getOpcode()) {
+  case ISD::ADD:
+    Opcode = M68kISD::ADD;
+    NumOperands = 2;
+    break;
+  case ISD::SHL:
+  case ISD::SRL:
+    // If we have a constant logical shift that's only used in a comparison
+    // against zero turn it into an equivalent AND. This allows turning it into
+    // a TEST instruction later.
+    if ((M68kCC == M68k::COND_EQ || M68kCC == M68k::COND_NE) &&
+        Op->hasOneUse() && isa<ConstantSDNode>(Op->getOperand(1)) &&
+        !hasNonFlagsUse(Op)) {
+      EVT VT = Op.getValueType();
+      unsigned BitWidth = VT.getSizeInBits();
+      unsigned ShAmt = Op->getConstantOperandVal(1);
+      if (ShAmt >= BitWidth) // Avoid undefined shifts.
+        break;
+      APInt Mask = ArithOp.getOpcode() == ISD::SRL
+                       ? APInt::getHighBitsSet(BitWidth, BitWidth - ShAmt)
+                       : APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt);
+      if (!Mask.isSignedIntN(32)) // Avoid large immediates.
+        break;
+      Op = DAG.getNode(ISD::AND, DL, VT, Op->getOperand(0),
+                       DAG.getConstant(Mask, DL, VT));
+    }
+    break;
+
+  case ISD::AND:
+    // If the primary 'and' result isn't used, don't bother using
+    // M68kISD::AND, because a TEST instruction will be better.
+    if (!hasNonFlagsUse(Op)) {
+      SDValue Op0 = ArithOp->getOperand(0);
+      SDValue Op1 = ArithOp->getOperand(1);
+      EVT VT = ArithOp.getValueType();
+      bool IsAndn = isBitwiseNot(Op0) || isBitwiseNot(Op1);
+      bool IsLegalAndnType = VT == MVT::i32 || VT == MVT::i64;
+
+      // But if we can combine this into an ANDN operation, then create an AND
+      // now and allow it to be pattern matched into an ANDN.
+      if (/*!Subtarget.hasBMI() ||*/ !IsAndn || !IsLegalAndnType)
+        break;
+    }
+    LLVM_FALLTHROUGH;
+  case ISD::SUB:
+  case ISD::OR:
+  case ISD::XOR:
+    // Due to the ISEL shortcoming noted above, be conservative if this op is
+    // likely to be selected as part of a load-modify-store instruction.
+    for (const auto *U : Op.getNode()->uses())
+      if (U->getOpcode() == ISD::STORE)
+        goto default_case;
+
+    // Otherwise use a regular CCR-setting instruction.
+    switch (ArithOp.getOpcode()) {
+    default:
+      llvm_unreachable("unexpected operator!");
+    case ISD::SUB:
+      Opcode = M68kISD::SUB;
+      break;
+    case ISD::XOR:
+      Opcode = M68kISD::XOR;
+      break;
+    case ISD::AND:
+      Opcode = M68kISD::AND;
+      break;
+    case ISD::OR:
+      Opcode = M68kISD::OR;
+      break;
+    }
+
+    NumOperands = 2;
+    break;
+  case M68kISD::ADD:
+  case M68kISD::SUB:
+  case M68kISD::OR:
+  case M68kISD::XOR:
+  case M68kISD::AND:
+    return SDValue(Op.getNode(), 1);
+  default:
+  default_case:
+    break;
+  }
+
+  // If we found that truncation is beneficial, perform the truncation and
+  // update 'Op'.
+  if (NeedTruncation) {
+    EVT VT = Op.getValueType();
+    SDValue WideVal = Op->getOperand(0);
+    EVT WideVT = WideVal.getValueType();
+    unsigned ConvertedOp = 0;
+    // Use a target machine opcode to prevent further DAGCombine
+    // optimizations that may separate the arithmetic operations
+    // from the setcc node.
+    switch (WideVal.getOpcode()) {
+    default:
+      break;
+    case ISD::ADD:
+      ConvertedOp = M68kISD::ADD;
+      break;
+    case ISD::SUB:
+      ConvertedOp = M68kISD::SUB;
+      break;
+    case ISD::AND:
+      ConvertedOp = M68kISD::AND;
+      break;
+    case ISD::OR:
+      ConvertedOp = M68kISD::OR;
+      break;
+    case ISD::XOR:
+      ConvertedOp = M68kISD::XOR;
+      break;
+    }
+
+    if (ConvertedOp) {
+      const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+      if (TLI.isOperationLegal(WideVal.getOpcode(), WideVT)) {
+        SDValue V0 = DAG.getNode(ISD::TRUNCATE, DL, VT, WideVal.getOperand(0));
+        SDValue V1 = DAG.getNode(ISD::TRUNCATE, DL, VT, WideVal.getOperand(1));
+        Op = DAG.getNode(ConvertedOp, DL, VT, V0, V1);
+      }
+    }
+  }
+
+  if (Opcode == 0) {
+    // Emit a CMP with 0, which is the TEST pattern.
+    return DAG.getNode(M68kISD::CMP, DL, MVT::i8,
+                       DAG.getConstant(0, DL, Op.getValueType()), Op);
+  }
+  SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i8);
+  SmallVector<SDValue, 4> Ops(Op->op_begin(), Op->op_begin() + NumOperands);
+
+  SDValue New = DAG.getNode(Opcode, DL, VTs, Ops);
+  DAG.ReplaceAllUsesWith(Op, New);
+  return SDValue(New.getNode(), 1);
+}
+
+/// \brief Return true if the condition is an unsigned comparison operation.
+static bool isM68kCCUnsigned(unsigned M68kCC) {
+  switch (M68kCC) {
+  default:
+    llvm_unreachable("Invalid integer condition!");
+  case M68k::COND_EQ:
+  case M68k::COND_NE:
+  case M68k::COND_CS:
+  case M68k::COND_HI:
+  case M68k::COND_LS:
+  case M68k::COND_CC:
+    return true;
+  case M68k::COND_GT:
+  case M68k::COND_GE:
+  case M68k::COND_LT:
+  case M68k::COND_LE:
+    return false;
+  }
+}
+
+SDValue M68kTargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned M68kCC,
+                                    const SDLoc &DL, SelectionDAG &DAG) const {
+  if (isNullConstant(Op1))
+    return EmitTest(Op0, M68kCC, DL, DAG);
+
+  assert(!(isa<ConstantSDNode>(Op1) && Op0.getValueType() == MVT::i1) &&
+         "Unexpected comparison operation for MVT::i1 operands");
+
+  if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 ||
+       Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) {
+    // Only promote the compare up to I32 if it is a 16 bit operation
+    // with an immediate.  16 bit immediates are to be avoided.
+    if ((Op0.getValueType() == MVT::i16 &&
+         (isa<ConstantSDNode>(Op0) || isa<ConstantSDNode>(Op1))) &&
+        !DAG.getMachineFunction().getFunction().hasMinSize()) {
+      unsigned ExtendOp =
+          isM68kCCUnsigned(M68kCC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
+      Op0 = DAG.getNode(ExtendOp, DL, MVT::i32, Op0);
+      Op1 = DAG.getNode(ExtendOp, DL, MVT::i32, Op1);
+    }
+    // Use SUB instead of CMP to enable CSE between SUB and CMP.
+    SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i8);
+    SDValue Sub = DAG.getNode(M68kISD::SUB, DL, VTs, Op0, Op1);
+    return SDValue(Sub.getNode(), 1);
+  }
+  return DAG.getNode(M68kISD::CMP, DL, MVT::i8, Op0, Op1);
+}
+
+/// Result of 'and' or 'trunc to i1' is compared against zero.
+/// Change to a BT node if possible.
+SDValue M68kTargetLowering::LowerToBT(SDValue Op, ISD::CondCode CC,
+                                      const SDLoc &DL,
+                                      SelectionDAG &DAG) const {
+  if (Op.getOpcode() == ISD::AND)
+    return LowerAndToBT(Op, CC, DL, DAG);
+  if (Op.getOpcode() == ISD::TRUNCATE && Op.getValueType() == MVT::i1)
+    return LowerTruncateToBT(Op, CC, DL, DAG);
+  return SDValue();
+}
+
+SDValue M68kTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
+  MVT VT = Op.getSimpleValueType();
+  assert(VT == MVT::i8 && "SetCC type must be 8-bit integer");
+
+  SDValue Op0 = Op.getOperand(0);
+  SDValue Op1 = Op.getOperand(1);
+  SDLoc DL(Op);
+  ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
+
+  // Optimize to BT if possible.
+  // Lower (X & (1 << N)) == 0 to BT(X, N).
+  // Lower ((X >>u N) & 1) != 0 to BT(X, N).
+  // Lower ((X >>s N) & 1) != 0 to BT(X, N).
+  // Lower (trunc (X >> N) to i1) to BT(X, N).
+  if (Op0.hasOneUse() && isNullConstant(Op1) &&
+      (CC == ISD::SETEQ || CC == ISD::SETNE)) {
+    if (SDValue NewSetCC = LowerToBT(Op0, CC, DL, DAG)) {
+      if (VT == MVT::i1)
+        return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, NewSetCC);
+      return NewSetCC;
+    }
+  }
+
+  // Look for X == 0, X == 1, X != 0, or X != 1.  We can simplify some forms of
+  // these.
+  if ((isOneConstant(Op1) || isNullConstant(Op1)) &&
+      (CC == ISD::SETEQ || CC == ISD::SETNE)) {
+
+    // If the input is a setcc, then reuse the input setcc or use a new one with
+    // the inverted condition.
+    if (Op0.getOpcode() == M68kISD::SETCC) {
+      M68k::CondCode CCode = (M68k::CondCode)Op0.getConstantOperandVal(0);
+      bool Invert = (CC == ISD::SETNE) ^ isNullConstant(Op1);
+      if (!Invert)
+        return Op0;
+
+      CCode = M68k::GetOppositeBranchCondition(CCode);
+      SDValue SetCC =
+          DAG.getNode(M68kISD::SETCC, DL, MVT::i8,
+                      DAG.getConstant(CCode, DL, MVT::i8), Op0.getOperand(1));
+      if (VT == MVT::i1)
+        return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
+      return SetCC;
+    }
+  }
+  if (Op0.getValueType() == MVT::i1 && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
+    if (isOneConstant(Op1)) {
+      ISD::CondCode NewCC = ISD::GlobalISel::getSetCCInverse(CC, true);
+      return DAG.getSetCC(DL, VT, Op0, DAG.getConstant(0, DL, MVT::i1), NewCC);
+    }
+    if (!isNullConstant(Op1)) {
+      SDValue Xor = DAG.getNode(ISD::XOR, DL, MVT::i1, Op0, Op1);
+      return DAG.getSetCC(DL, VT, Xor, DAG.getConstant(0, DL, MVT::i1), CC);
+    }
+  }
+
+  bool IsFP = Op1.getSimpleValueType().isFloatingPoint();
+  unsigned M68kCC = TranslateM68kCC(CC, DL, IsFP, Op0, Op1, DAG);
+  if (M68kCC == M68k::COND_INVALID)
+    return SDValue();
+
+  SDValue CCR = EmitCmp(Op0, Op1, M68kCC, DL, DAG);
+  return DAG.getNode(M68kISD::SETCC, DL, MVT::i8,
+                     DAG.getConstant(M68kCC, DL, MVT::i8), CCR);
+}
+
+SDValue M68kTargetLowering::LowerSETCCCARRY(SDValue Op,
+                                            SelectionDAG &DAG) const {
+  SDValue LHS = Op.getOperand(0);
+  SDValue RHS = Op.getOperand(1);
+  SDValue Carry = Op.getOperand(2);
+  SDValue Cond = Op.getOperand(3);
+  SDLoc DL(Op);
+
+  assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only.");
+  M68k::CondCode CC = TranslateIntegerM68kCC(cast<CondCodeSDNode>(Cond)->get());
+
+  EVT CarryVT = Carry.getValueType();
+  APInt NegOne = APInt::getAllOnesValue(CarryVT.getScalarSizeInBits());
+  Carry = DAG.getNode(M68kISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32), Carry,
+                      DAG.getConstant(NegOne, DL, CarryVT));
+
+  SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
+  SDValue Cmp =
+      DAG.getNode(M68kISD::SUBX, DL, VTs, LHS, RHS, Carry.getValue(1));
+
+  return DAG.getNode(M68kISD::SETCC, DL, MVT::i8,
+                     DAG.getConstant(CC, DL, MVT::i8), Cmp.getValue(1));
+}
+
+/// Return true if opcode is a M68k logical comparison.
+static bool isM68kLogicalCmp(SDValue Op) {
+  unsigned Opc = Op.getNode()->getOpcode();
+  if (Opc == M68kISD::CMP)
+    return true;
+  if (Op.getResNo() == 1 &&
+      (Opc == M68kISD::ADD || Opc == M68kISD::SUB || Opc == M68kISD::ADDX ||
+       Opc == M68kISD::SUBX || Opc == M68kISD::SMUL || Opc == M68kISD::UMUL ||
+       Opc == M68kISD::OR || Opc == M68kISD::XOR || Opc == M68kISD::AND))
+    return true;
+
+  if (Op.getResNo() == 2 && Opc == M68kISD::UMUL)
+    return true;
+
+  return false;
+}
+
+static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
+  if (V.getOpcode() != ISD::TRUNCATE)
+    return false;
+
+  SDValue VOp0 = V.getOperand(0);
+  unsigned InBits = VOp0.getValueSizeInBits();
+  unsigned Bits = V.getValueSizeInBits();
+  return DAG.MaskedValueIsZero(VOp0,
+                               APInt::getHighBitsSet(InBits, InBits - Bits));
+}
+
+SDValue M68kTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
+  bool addTest = true;
+  SDValue Cond = Op.getOperand(0);
+  SDValue Op1 = Op.getOperand(1);
+  SDValue Op2 = Op.getOperand(2);
+  SDLoc DL(Op);
+  SDValue CC;
+
+  if (Cond.getOpcode() == ISD::SETCC) {
+    if (SDValue NewCond = LowerSETCC(Cond, DAG))
+      Cond = NewCond;
+  }
+
+  // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
+  // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
+  // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
+  // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
+  if (Cond.getOpcode() == M68kISD::SETCC &&
+      Cond.getOperand(1).getOpcode() == M68kISD::CMP &&
+      isNullConstant(Cond.getOperand(1).getOperand(0))) {
+    SDValue Cmp = Cond.getOperand(1);
+
+    unsigned CondCode =
+        cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue();
+
+    if ((isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
+        (CondCode == M68k::COND_EQ || CondCode == M68k::COND_NE)) {
+      SDValue Y = isAllOnesConstant(Op2) ? Op1 : Op2;
+
+      SDValue CmpOp0 = Cmp.getOperand(1);
+      // Apply further optimizations for special cases
+      // (select (x != 0), -1, 0) -> neg & sbb
+      // (select (x == 0), 0, -1) -> neg & sbb
+      if (isNullConstant(Y) &&
+          (isAllOnesConstant(Op1) == (CondCode == M68k::COND_NE))) {
+
+        SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32);
+
+        SDValue Neg =
+            DAG.getNode(M68kISD::SUB, DL, VTs,
+                        DAG.getConstant(0, DL, CmpOp0.getValueType()), CmpOp0);
+
+        SDValue Res = DAG.getNode(M68kISD::SETCC_CARRY, DL, Op.getValueType(),
+                                  DAG.getConstant(M68k::COND_CS, DL, MVT::i8),
+                                  SDValue(Neg.getNode(), 1));
+        return Res;
+      }
+
+      Cmp = DAG.getNode(M68kISD::CMP, DL, MVT::i8,
+                        DAG.getConstant(1, DL, CmpOp0.getValueType()), CmpOp0);
+
+      SDValue Res = // Res = 0 or -1.
+          DAG.getNode(M68kISD::SETCC_CARRY, DL, Op.getValueType(),
+                      DAG.getConstant(M68k::COND_CS, DL, MVT::i8), Cmp);
+
+      if (isAllOnesConstant(Op1) != (CondCode == M68k::COND_EQ))
+        Res = DAG.getNOT(DL, Res, Res.getValueType());
+
+      if (!isNullConstant(Op2))
+        Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
+      return Res;
+    }
+  }
+
+  // Look past (and (setcc_carry (cmp ...)), 1).
+  if (Cond.getOpcode() == ISD::AND &&
+      Cond.getOperand(0).getOpcode() == M68kISD::SETCC_CARRY &&
+      isOneConstant(Cond.getOperand(1)))
+    Cond = Cond.getOperand(0);
+
+  // If condition flag is set by a M68kISD::CMP, then use it as the condition
+  // setting operand in place of the M68kISD::SETCC.
+  unsigned CondOpcode = Cond.getOpcode();
+  if (CondOpcode == M68kISD::SETCC || CondOpcode == M68kISD::SETCC_CARRY) {
+    CC = Cond.getOperand(0);
+
+    SDValue Cmp = Cond.getOperand(1);
+    unsigned Opc = Cmp.getOpcode();
+
+    bool IllegalFPCMov = false;
+
+    if ((isM68kLogicalCmp(Cmp) && !IllegalFPCMov) || Opc == M68kISD::BT) {
+      Cond = Cmp;
+      addTest = false;
+    }
+  } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
+             CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
+             CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) {
+    SDValue LHS = Cond.getOperand(0);
+    SDValue RHS = Cond.getOperand(1);
+    unsigned MxOpcode;
+    unsigned MxCond;
+    SDVTList VTs;
+    switch (CondOpcode) {
+    case ISD::UADDO:
+      MxOpcode = M68kISD::ADD;
+      MxCond = M68k::COND_CS;
+      break;
+    case ISD::SADDO:
+      MxOpcode = M68kISD::ADD;
+      MxCond = M68k::COND_VS;
+      break;
+    case ISD::USUBO:
+      MxOpcode = M68kISD::SUB;
+      MxCond = M68k::COND_CS;
+      break;
+    case ISD::SSUBO:
+      MxOpcode = M68kISD::SUB;
+      MxCond = M68k::COND_VS;
+      break;
+    case ISD::UMULO:
+      MxOpcode = M68kISD::UMUL;
+      MxCond = M68k::COND_VS;
+      break;
+    case ISD::SMULO:
+      MxOpcode = M68kISD::SMUL;
+      MxCond = M68k::COND_VS;
+      break;
+    default:
+      llvm_unreachable("unexpected overflowing operator");
+    }
+    if (CondOpcode == ISD::UMULO)
+      VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(), MVT::i32);
+    else
+      VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
+
+    SDValue MxOp = DAG.getNode(MxOpcode, DL, VTs, LHS, RHS);
+
+    if (CondOpcode == ISD::UMULO)
+      Cond = MxOp.getValue(2);
+    else
+      Cond = MxOp.getValue(1);
+
+    CC = DAG.getConstant(MxCond, DL, MVT::i8);
+    addTest = false;
+  }
+
+  if (addTest) {
+    // Look past the truncate if the high bits are known zero.
+    if (isTruncWithZeroHighBitsInput(Cond, DAG))
+      Cond = Cond.getOperand(0);
+
+    // We know the result of AND is compared against zero. Try to match
+    // it to BT.
+    if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
+      if (SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, DL, DAG)) {
+        CC = NewSetCC.getOperand(0);
+        Cond = NewSetCC.getOperand(1);
+        addTest = false;
+      }
+    }
+  }
+
+  if (addTest) {
+    CC = DAG.getConstant(M68k::COND_NE, DL, MVT::i8);
+    Cond = EmitTest(Cond, M68k::COND_NE, DL, DAG);
+  }
+
+  // a <  b ? -1 :  0 -> RES = ~setcc_carry
+  // a <  b ?  0 : -1 -> RES = setcc_carry
+  // a >= b ? -1 :  0 -> RES = setcc_carry
+  // a >= b ?  0 : -1 -> RES = ~setcc_carry
+  if (Cond.getOpcode() == M68kISD::SUB) {
+    unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
+
+    if ((CondCode == M68k::COND_CC || CondCode == M68k::COND_CS) &&
+        (isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
+        (isNullConstant(Op1) || isNullConstant(Op2))) {
+      SDValue Res =
+          DAG.getNode(M68kISD::SETCC_CARRY, DL, Op.getValueType(),
+                      DAG.getConstant(M68k::COND_CS, DL, MVT::i8), Cond);
+      if (isAllOnesConstant(Op1) != (CondCode == M68k::COND_CS))
+        return DAG.getNOT(DL, Res, Res.getValueType());
+      return Res;
+    }
+  }
+
+  // M68k doesn't have an i8 cmov. If both operands are the result of a
+  // truncate widen the cmov and push the truncate through. This avoids
+  // introducing a new branch during isel and doesn't add any extensions.
+  if (Op.getValueType() == MVT::i8 && Op1.getOpcode() == ISD::TRUNCATE &&
+      Op2.getOpcode() == ISD::TRUNCATE) {
+    SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
+    if (T1.getValueType() == T2.getValueType() &&
+        // Blacklist CopyFromReg to avoid partial register stalls.
+        T1.getOpcode() != ISD::CopyFromReg &&
+        T2.getOpcode() != ISD::CopyFromReg) {
+      SDVTList VTs = DAG.getVTList(T1.getValueType(), MVT::Glue);
+      SDValue Cmov = DAG.getNode(M68kISD::CMOV, DL, VTs, T2, T1, CC, Cond);
+      return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
+    }
+  }
+
+  // M68kISD::CMOV means set the result (which is operand 1) to the RHS if
+  // condition is true.
+  SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
+  SDValue Ops[] = {Op2, Op1, CC, Cond};
+  return DAG.getNode(M68kISD::CMOV, DL, VTs, Ops);
+}
+
+/// Return true if node is an ISD::AND or ISD::OR of two M68k::SETcc nodes
+/// each of which has no other use apart from the AND / OR.
+static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
+  Opc = Op.getOpcode();
+  if (Opc != ISD::OR && Opc != ISD::AND)
+    return false;
+  return (M68k::IsSETCC(Op.getOperand(0).getOpcode()) &&
+          Op.getOperand(0).hasOneUse() &&
+          M68k::IsSETCC(Op.getOperand(1).getOpcode()) &&
+          Op.getOperand(1).hasOneUse());
+}
+
+/// Return true if node is an ISD::XOR of a M68kISD::SETCC and 1 and that the
+/// SETCC node has a single use.
+static bool isXor1OfSetCC(SDValue Op) {
+  if (Op.getOpcode() != ISD::XOR)
+    return false;
+  if (isOneConstant(Op.getOperand(1)))
+    return Op.getOperand(0).getOpcode() == M68kISD::SETCC &&
+           Op.getOperand(0).hasOneUse();
+  return false;
+}
+
+SDValue M68kTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
+  bool AddTest = true;
+  SDValue Chain = Op.getOperand(0);
+  SDValue Cond = Op.getOperand(1);
+  SDValue Dest = Op.getOperand(2);
+  SDLoc DL(Op);
+  SDValue CC;
+  bool Inverted = false;
+
+  if (Cond.getOpcode() == ISD::SETCC) {
+    // Check for setcc([su]{add,sub}o == 0).
+    if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
+        isNullConstant(Cond.getOperand(1)) &&
+        Cond.getOperand(0).getResNo() == 1 &&
+        (Cond.getOperand(0).getOpcode() == ISD::SADDO ||
+         Cond.getOperand(0).getOpcode() == ISD::UADDO ||
+         Cond.getOperand(0).getOpcode() == ISD::SSUBO ||
+         Cond.getOperand(0).getOpcode() == ISD::USUBO)) {
+      Inverted = true;
+      Cond = Cond.getOperand(0);
+    } else {
+      if (SDValue NewCond = LowerSETCC(Cond, DAG))
+        Cond = NewCond;
+    }
+  }
+
+  // Look pass (and (setcc_carry (cmp ...)), 1).
+  if (Cond.getOpcode() == ISD::AND &&
+      Cond.getOperand(0).getOpcode() == M68kISD::SETCC_CARRY &&
+      isOneConstant(Cond.getOperand(1)))
+    Cond = Cond.getOperand(0);
+
+  // If condition flag is set by a M68kISD::CMP, then use it as the condition
+  // setting operand in place of the M68kISD::SETCC.
+  unsigned CondOpcode = Cond.getOpcode();
+  if (CondOpcode == M68kISD::SETCC || CondOpcode == M68kISD::SETCC_CARRY) {
+    CC = Cond.getOperand(0);
+
+    SDValue Cmp = Cond.getOperand(1);
+    unsigned Opc = Cmp.getOpcode();
+
+    if (isM68kLogicalCmp(Cmp) || Opc == M68kISD::BT) {
+      Cond = Cmp;
+      AddTest = false;
+    } else {
+      switch (cast<ConstantSDNode>(CC)->getZExtValue()) {
+      default:
+        break;
+      case M68k::COND_VS:
+      case M68k::COND_CS:
+        // These can only come from an arithmetic instruction with overflow,
+        // e.g. SADDO, UADDO.
+        Cond = Cond.getNode()->getOperand(1);
+        AddTest = false;
+        break;
+      }
+    }
+  }
+  CondOpcode = Cond.getOpcode();
+  if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
+      CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO) {
+    SDValue LHS = Cond.getOperand(0);
+    SDValue RHS = Cond.getOperand(1);
+    unsigned MxOpcode;
+    unsigned MxCond;
+    SDVTList VTs;
+    // Keep this in sync with LowerXALUO, otherwise we might create redundant
+    // instructions that can't be removed afterwards (i.e. M68kISD::ADD and
+    // M68kISD::INC).
+    switch (CondOpcode) {
+    case ISD::UADDO:
+      MxOpcode = M68kISD::ADD;
+      MxCond = M68k::COND_CS;
+      break;
+    case ISD::SADDO:
+      MxOpcode = M68kISD::ADD;
+      MxCond = M68k::COND_VS;
+      break;
+    case ISD::USUBO:
+      MxOpcode = M68kISD::SUB;
+      MxCond = M68k::COND_CS;
+      break;
+    case ISD::SSUBO:
+      MxOpcode = M68kISD::SUB;
+      MxCond = M68k::COND_VS;
+      break;
+    case ISD::UMULO:
+      MxOpcode = M68kISD::UMUL;
+      MxCond = M68k::COND_VS;
+      break;
+    case ISD::SMULO:
+      MxOpcode = M68kISD::SMUL;
+      MxCond = M68k::COND_VS;
+      break;
+    default:
+      llvm_unreachable("unexpected overflowing operator");
+    }
+
+    if (Inverted)
+      MxCond = M68k::GetOppositeBranchCondition((M68k::CondCode)MxCond);
+
+    if (CondOpcode == ISD::UMULO)
+      VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(), MVT::i8);
+    else
+      VTs = DAG.getVTList(LHS.getValueType(), MVT::i8);
+
+    SDValue MxOp = DAG.getNode(MxOpcode, DL, VTs, LHS, RHS);
+
+    if (CondOpcode == ISD::UMULO)
+      Cond = MxOp.getValue(2);
+    else
+      Cond = MxOp.getValue(1);
+
+    CC = DAG.getConstant(MxCond, DL, MVT::i8);
+    AddTest = false;
+  } else {
+    unsigned CondOpc;
+    if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) {
+      SDValue Cmp = Cond.getOperand(0).getOperand(1);
+      if (CondOpc == ISD::OR) {
+        // Also, recognize the pattern generated by an FCMP_UNE. We can emit
+        // two branches instead of an explicit OR instruction with a
+        // separate test.
+        if (Cmp == Cond.getOperand(1).getOperand(1) && isM68kLogicalCmp(Cmp)) {
+          CC = Cond.getOperand(0).getOperand(0);
+          Chain = DAG.getNode(M68kISD::BRCOND, DL, Op.getValueType(), Chain,
+                              Dest, CC, Cmp);
+          CC = Cond.getOperand(1).getOperand(0);
+          Cond = Cmp;
+          AddTest = false;
+        }
+      } else { // ISD::AND
+        // Also, recognize the pattern generated by an FCMP_OEQ. We can emit
+        // two branches instead of an explicit AND instruction with a
+        // separate test. However, we only do this if this block doesn't
+        // have a fall-through edge, because this requires an explicit
+        // jmp when the condition is false.
+        if (Cmp == Cond.getOperand(1).getOperand(1) && isM68kLogicalCmp(Cmp) &&
+            Op.getNode()->hasOneUse()) {
+          M68k::CondCode CCode =
+              (M68k::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
+          CCode = M68k::GetOppositeBranchCondition(CCode);
+          CC = DAG.getConstant(CCode, DL, MVT::i8);
+          SDNode *User = *Op.getNode()->use_begin();
+          // Look for an unconditional branch following this conditional branch.
+          // We need this because we need to reverse the successors in order
+          // to implement FCMP_OEQ.
+          if (User->getOpcode() == ISD::BR) {
+            SDValue FalseBB = User->getOperand(1);
+            SDNode *NewBR =
+                DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
+            assert(NewBR == User);
+            (void)NewBR;
+            Dest = FalseBB;
+
+            Chain = DAG.getNode(M68kISD::BRCOND, DL, Op.getValueType(), Chain,
+                                Dest, CC, Cmp);
+            M68k::CondCode CCode =
+                (M68k::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
+            CCode = M68k::GetOppositeBranchCondition(CCode);
+            CC = DAG.getConstant(CCode, DL, MVT::i8);
+            Cond = Cmp;
+            AddTest = false;
+          }
+        }
+      }
+    } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) {
+      // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition.
+      // It should be transformed during dag combiner except when the condition
+      // is set by a arithmetics with overflow node.
+      M68k::CondCode CCode =
+          (M68k::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
+      CCode = M68k::GetOppositeBranchCondition(CCode);
+      CC = DAG.getConstant(CCode, DL, MVT::i8);
+      Cond = Cond.getOperand(0).getOperand(1);
+      AddTest = false;
+    }
+  }
+
+  if (AddTest) {
+    // Look pass the truncate if the high bits are known zero.
+    if (isTruncWithZeroHighBitsInput(Cond, DAG))
+      Cond = Cond.getOperand(0);
+
+    // We know the result is compared against zero. Try to match it to BT.
+    if (Cond.hasOneUse()) {
+      if (SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, DL, DAG)) {
+        CC = NewSetCC.getOperand(0);
+        Cond = NewSetCC.getOperand(1);
+        AddTest = false;
+      }
+    }
+  }
+
+  if (AddTest) {
+    M68k::CondCode MxCond = Inverted ? M68k::COND_EQ : M68k::COND_NE;
+    CC = DAG.getConstant(MxCond, DL, MVT::i8);
+    Cond = EmitTest(Cond, MxCond, DL, DAG);
+  }
+  return DAG.getNode(M68kISD::BRCOND, DL, Op.getValueType(), Chain, Dest, CC,
+                     Cond);
+}
+
+SDValue M68kTargetLowering::LowerADDC_ADDE_SUBC_SUBE(SDValue Op,
+                                                     SelectionDAG &DAG) const {
+  MVT VT = Op.getNode()->getSimpleValueType(0);
+
+  // Let legalize expand this if it isn't a legal type yet.
+  if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
+    return SDValue();
+
+  SDVTList VTs = DAG.getVTList(VT, MVT::i8);
+
+  unsigned Opc;
+  bool ExtraOp = false;
+  switch (Op.getOpcode()) {
+  default:
+    llvm_unreachable("Invalid code");
+  case ISD::ADDC:
+    Opc = M68kISD::ADD;
+    break;
+  case ISD::ADDE:
+    Opc = M68kISD::ADDX;
+    ExtraOp = true;
+    break;
+  case ISD::SUBC:
+    Opc = M68kISD::SUB;
+    break;
+  case ISD::SUBE:
+    Opc = M68kISD::SUBX;
+    ExtraOp = true;
+    break;
+  }
+
+  if (!ExtraOp)
+    return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), Op.getOperand(1));
+  return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), Op.getOperand(1),
+                     Op.getOperand(2));
+}
+
+// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
+// their target countpart wrapped in the M68kISD::Wrapper node. Suppose N is
+// one of the above mentioned nodes. It has to be wrapped because otherwise
+// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
+// be used to form addressing mode. These wrapped nodes will be selected
+// into MOV32ri.
+SDValue M68kTargetLowering::LowerConstantPool(SDValue Op,
+                                              SelectionDAG &DAG) const {
+  ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
+
+  // In PIC mode (unless we're in PCRel PIC mode) we add an offset to the
+  // global base reg.
+  unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
+
+  unsigned WrapperKind = M68kISD::Wrapper;
+  if (M68kII::isPCRelGlobalReference(OpFlag)) {
+    WrapperKind = M68kISD::WrapperPC;
+  }
+
+  MVT PtrVT = getPointerTy(DAG.getDataLayout());
+  SDValue Result = DAG.getTargetConstantPool(
+      CP->getConstVal(), PtrVT, CP->getAlign(), CP->getOffset(), OpFlag);
+
+  SDLoc DL(CP);
+  Result = DAG.getNode(WrapperKind, DL, PtrVT, Result);
+
+  // With PIC, the address is actually $g + Offset.
+  if (M68kII::isGlobalRelativeToPICBase(OpFlag)) {
+    Result = DAG.getNode(ISD::ADD, DL, PtrVT,
+                         DAG.getNode(M68kISD::GLOBAL_BASE_REG, SDLoc(), PtrVT),
+                         Result);
+  }
+
+  return Result;
+}
+
+SDValue M68kTargetLowering::LowerExternalSymbol(SDValue Op,
+                                                SelectionDAG &DAG) const {
+  const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
+
+  // In PIC mode (unless we're in PCRel PIC mode) we add an offset to the
+  // global base reg.
+  const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
+  unsigned char OpFlag = Subtarget.classifyExternalReference(*Mod);
+
+  unsigned WrapperKind = M68kISD::Wrapper;
+  if (M68kII::isPCRelGlobalReference(OpFlag)) {
+    WrapperKind = M68kISD::WrapperPC;
+  }
+
+  auto PtrVT = getPointerTy(DAG.getDataLayout());
+  SDValue Result = DAG.getTargetExternalSymbol(Sym, PtrVT, OpFlag);
+
+  SDLoc DL(Op);
+  Result = DAG.getNode(WrapperKind, DL, PtrVT, Result);
+
+  // With PIC, the address is actually $g + Offset.
+  if (M68kII::isGlobalRelativeToPICBase(OpFlag)) {
+    Result = DAG.getNode(ISD::ADD, DL, PtrVT,
+                         DAG.getNode(M68kISD::GLOBAL_BASE_REG, SDLoc(), PtrVT),
+                         Result);
+  }
+
+  // For symbols that require a load from a stub to get the address, emit the
+  // load.
+  if (M68kII::isGlobalStubReference(OpFlag)) {
+    Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
+                         MachinePointerInfo::getGOT(DAG.getMachineFunction()));
+  }
+
+  return Result;
+}
+
+SDValue M68kTargetLowering::LowerBlockAddress(SDValue Op,
+                                              SelectionDAG &DAG) const {
+  unsigned char OpFlags = Subtarget.classifyBlockAddressReference();
+  const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
+  int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
+  SDLoc DL(Op);
+  auto PtrVT = getPointerTy(DAG.getDataLayout());
+
+  // Create the TargetBlockAddressAddress node.
+  SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset, OpFlags);
+
+  if (M68kII::isPCRelBlockReference(OpFlags)) {
+    Result = DAG.getNode(M68kISD::WrapperPC, DL, PtrVT, Result);
+  } else {
+    Result = DAG.getNode(M68kISD::Wrapper, DL, PtrVT, Result);
+  }
+
+  // With PIC, the address is actually $g + Offset.
+  if (M68kII::isGlobalRelativeToPICBase(OpFlags)) {
+    Result =
+        DAG.getNode(ISD::ADD, DL, PtrVT,
+                    DAG.getNode(M68kISD::GLOBAL_BASE_REG, DL, PtrVT), Result);
+  }
+
+  return Result;
+}
+
+SDValue M68kTargetLowering::LowerGlobalAddress(const GlobalValue *GV,
+                                               const SDLoc &DL, int64_t Offset,
+                                               SelectionDAG &DAG) const {
+  unsigned char OpFlags = Subtarget.classifyGlobalReference(GV);
+  auto PtrVT = getPointerTy(DAG.getDataLayout());
+
+  // Create the TargetGlobalAddress node, folding in the constant
+  // offset if it is legal.
+  SDValue Result;
+  if (M68kII::isDirectGlobalReference(OpFlags)) {
+    Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Offset);
+    Offset = 0;
+  } else {
+    Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
+  }
+
+  if (M68kII::isPCRelGlobalReference(OpFlags))
+    Result = DAG.getNode(M68kISD::WrapperPC, DL, PtrVT, Result);
+  else
+    Result = DAG.getNode(M68kISD::Wrapper, DL, PtrVT, Result);
+
+  // With PIC, the address is actually $g + Offset.
+  if (M68kII::isGlobalRelativeToPICBase(OpFlags)) {
+    Result =
+        DAG.getNode(ISD::ADD, DL, PtrVT,
+                    DAG.getNode(M68kISD::GLOBAL_BASE_REG, DL, PtrVT), Result);
+  }
+
+  // For globals that require a load from a stub to get the address, emit the
+  // load.
+  if (M68kII::isGlobalStubReference(OpFlags)) {
+    Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
+                         MachinePointerInfo::getGOT(DAG.getMachineFunction()));
+  }
+
+  // If there was a non-zero offset that we didn't fold, create an explicit
+  // addition for it.
+  if (Offset != 0) {
+    Result = DAG.getNode(ISD::ADD, DL, PtrVT, Result,
+                         DAG.getConstant(Offset, DL, PtrVT));
+  }
+
+  return Result;
+}
+
+SDValue M68kTargetLowering::LowerGlobalAddress(SDValue Op,
+                                               SelectionDAG &DAG) const {
+  const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
+  int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
+  return LowerGlobalAddress(GV, SDLoc(Op), Offset, DAG);
+}
+
+//===----------------------------------------------------------------------===//
+// Custom Lower Jump Table
+//===----------------------------------------------------------------------===//
+
+SDValue M68kTargetLowering::LowerJumpTable(SDValue Op,
+                                           SelectionDAG &DAG) const {
+  JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
+
+  // In PIC mode (unless we're in PCRel PIC mode) we add an offset to the
+  // global base reg.
+  unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
+
+  unsigned WrapperKind = M68kISD::Wrapper;
+  if (M68kII::isPCRelGlobalReference(OpFlag)) {
+    WrapperKind = M68kISD::WrapperPC;
+  }
+
+  auto PtrVT = getPointerTy(DAG.getDataLayout());
+  SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, OpFlag);
+  SDLoc DL(JT);
+  Result = DAG.getNode(WrapperKind, DL, PtrVT, Result);
+
+  // With PIC, the address is actually $g + Offset.
+  if (M68kII::isGlobalRelativeToPICBase(OpFlag)) {
+    Result = DAG.getNode(ISD::ADD, DL, PtrVT,
+                         DAG.getNode(M68kISD::GLOBAL_BASE_REG, SDLoc(), PtrVT),
+                         Result);
+  }
+
+  return Result;
+}
+
+unsigned M68kTargetLowering::getJumpTableEncoding() const {
+  return Subtarget.getJumpTableEncoding();
+}
+
+const MCExpr *M68kTargetLowering::LowerCustomJumpTableEntry(
+    const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB,
+    unsigned uid, MCContext &Ctx) const {
+  return MCSymbolRefExpr::create(MBB->getSymbol(), MCSymbolRefExpr::VK_GOTOFF,
+                                 Ctx);
+}
+
+SDValue M68kTargetLowering::getPICJumpTableRelocBase(SDValue Table,
+                                                     SelectionDAG &DAG) const {
+  if (getJumpTableEncoding() == MachineJumpTableInfo::EK_Custom32)
+    return DAG.getNode(M68kISD::GLOBAL_BASE_REG, SDLoc(),
+                       getPointerTy(DAG.getDataLayout()));
+
+  // MachineJumpTableInfo::EK_LabelDifference32 entry
+  return Table;
+}
+
+// NOTE This only used for MachineJumpTableInfo::EK_LabelDifference32 entries
+const MCExpr *M68kTargetLowering::getPICJumpTableRelocBaseExpr(
+    const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const {
+  return MCSymbolRefExpr::create(MF->getJTISymbol(JTI, Ctx), Ctx);
+}
+
+/// Determines whether the callee is required to pop its own arguments.
+/// Callee pop is necessary to support tail calls.
+bool M68k::isCalleePop(CallingConv::ID CallingConv, bool IsVarArg,
+                       bool GuaranteeTCO) {
+  return false;
+}
+
+// Return true if it is OK for this CMOV pseudo-opcode to be cascaded
+// together with other CMOV pseudo-opcodes into a single basic-block with
+// conditional jump around it.
+static bool isCMOVPseudo(MachineInstr &MI) {
+  switch (MI.getOpcode()) {
+  case M68k::CMOV8d:
+  case M68k::CMOV16d:
+  case M68k::CMOV32r:
+    return true;
+
+  default:
+    return false;
+  }
+}
+
+// The CCR operand of SelectItr might be missing a kill marker
+// because there were multiple uses of CCR, and ISel didn't know
+// which to mark. Figure out whether SelectItr should have had a
+// kill marker, and set it if it should. Returns the correct kill
+// marker value.
+static bool checkAndUpdateCCRKill(MachineBasicBlock::iterator SelectItr,
+                                  MachineBasicBlock *BB,
+                                  const TargetRegisterInfo *TRI) {
+  // Scan forward through BB for a use/def of CCR.
+  MachineBasicBlock::iterator miI(std::next(SelectItr));
+  for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
+    const MachineInstr &mi = *miI;
+    if (mi.readsRegister(M68k::CCR))
+      return false;
+    if (mi.definesRegister(M68k::CCR))
+      break; // Should have kill-flag - update below.
+  }
+
+  // If we hit the end of the block, check whether CCR is live into a
+  // successor.
+  if (miI == BB->end())
+    for (const auto *SBB : BB->successors())
+      if (SBB->isLiveIn(M68k::CCR))
+        return false;
+
+  // We found a def, or hit the end of the basic block and CCR wasn't live
+  // out. SelectMI should have a kill flag on CCR.
+  SelectItr->addRegisterKilled(M68k::CCR, TRI);
+  return true;
+}
+
+MachineBasicBlock *
+M68kTargetLowering::EmitLoweredSelect(MachineInstr &MI,
+                                      MachineBasicBlock *MBB) const {
+  const TargetInstrInfo *TII = Subtarget.getInstrInfo();
+  DebugLoc DL = MI.getDebugLoc();
+
+  // To "insert" a SELECT_CC instruction, we actually have to insert the
+  // diamond control-flow pattern.  The incoming instruction knows the
+  // destination vreg to set, the condition code register to branch on, the
+  // true/false values to select between, and a branch opcode to use.
+  const BasicBlock *BB = MBB->getBasicBlock();
+  MachineFunction::iterator It = ++MBB->getIterator();
+
+  //  ThisMBB:
+  //  ...
+  //   TrueVal = ...
+  //   cmp ccX, r1, r2
+  //   bcc Copy1MBB
+  //   fallthrough --> Copy0MBB
+  MachineBasicBlock *ThisMBB = MBB;
+  MachineFunction *F = MBB->getParent();
+
+  // This code lowers all pseudo-CMOV instructions. Generally it lowers these
+  // as described above, by inserting a MBB, and then making a PHI at the join
+  // point to select the true and false operands of the CMOV in the PHI.
+  //
+  // The code also handles two 
diff erent cases of multiple CMOV opcodes
+  // in a row.
+  //
+  // Case 1:
+  // In this case, there are multiple CMOVs in a row, all which are based on
+  // the same condition setting (or the exact opposite condition setting).
+  // In this case we can lower all the CMOVs using a single inserted MBB, and
+  // then make a number of PHIs at the join point to model the CMOVs. The only
+  // trickiness here, is that in a case like:
+  //
+  // t2 = CMOV cond1 t1, f1
+  // t3 = CMOV cond1 t2, f2
+  //
+  // when rewriting this into PHIs, we have to perform some renaming on the
+  // temps since you cannot have a PHI operand refer to a PHI result earlier
+  // in the same block.  The "simple" but wrong lowering would be:
+  //
+  // t2 = PHI t1(BB1), f1(BB2)
+  // t3 = PHI t2(BB1), f2(BB2)
+  //
+  // but clearly t2 is not defined in BB1, so that is incorrect. The proper
+  // renaming is to note that on the path through BB1, t2 is really just a
+  // copy of t1, and do that renaming, properly generating:
+  //
+  // t2 = PHI t1(BB1), f1(BB2)
+  // t3 = PHI t1(BB1), f2(BB2)
+  //
+  // Case 2, we lower cascaded CMOVs such as
+  //
+  //   (CMOV (CMOV F, T, cc1), T, cc2)
+  //
+  // to two successives branches.
+  MachineInstr *CascadedCMOV = nullptr;
+  MachineInstr *LastCMOV = &MI;
+  M68k::CondCode CC = M68k::CondCode(MI.getOperand(3).getImm());
+  M68k::CondCode OppCC = M68k::GetOppositeBranchCondition(CC);
+  MachineBasicBlock::iterator NextMIIt =
+      std::next(MachineBasicBlock::iterator(MI));
+
+  // Check for case 1, where there are multiple CMOVs with the same condition
+  // first.  Of the two cases of multiple CMOV lowerings, case 1 reduces the
+  // number of jumps the most.
+
+  if (isCMOVPseudo(MI)) {
+    // See if we have a string of CMOVS with the same condition.
+    while (NextMIIt != MBB->end() && isCMOVPseudo(*NextMIIt) &&
+           (NextMIIt->getOperand(3).getImm() == CC ||
+            NextMIIt->getOperand(3).getImm() == OppCC)) {
+      LastCMOV = &*NextMIIt;
+      ++NextMIIt;
+    }
+  }
+
+  // This checks for case 2, but only do this if we didn't already find
+  // case 1, as indicated by LastCMOV == MI.
+  if (LastCMOV == &MI && NextMIIt != MBB->end() &&
+      NextMIIt->getOpcode() == MI.getOpcode() &&
+      NextMIIt->getOperand(2).getReg() == MI.getOperand(2).getReg() &&
+      NextMIIt->getOperand(1).getReg() == MI.getOperand(0).getReg() &&
+      NextMIIt->getOperand(1).isKill()) {
+    CascadedCMOV = &*NextMIIt;
+  }
+
+  MachineBasicBlock *Jcc1MBB = nullptr;
+
+  // If we have a cascaded CMOV, we lower it to two successive branches to
+  // the same block.  CCR is used by both, so mark it as live in the second.
+  if (CascadedCMOV) {
+    Jcc1MBB = F->CreateMachineBasicBlock(BB);
+    F->insert(It, Jcc1MBB);
+    Jcc1MBB->addLiveIn(M68k::CCR);
+  }
+
+  MachineBasicBlock *Copy0MBB = F->CreateMachineBasicBlock(BB);
+  MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(BB);
+  F->insert(It, Copy0MBB);
+  F->insert(It, SinkMBB);
+
+  // If the CCR register isn't dead in the terminator, then claim that it's
+  // live into the sink and copy blocks.
+  const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
+
+  MachineInstr *LastCCRSUser = CascadedCMOV ? CascadedCMOV : LastCMOV;
+  if (!LastCCRSUser->killsRegister(M68k::CCR) &&
+      !checkAndUpdateCCRKill(LastCCRSUser, MBB, TRI)) {
+    Copy0MBB->addLiveIn(M68k::CCR);
+    SinkMBB->addLiveIn(M68k::CCR);
+  }
+
+  // Transfer the remainder of MBB and its successor edges to SinkMBB.
+  SinkMBB->splice(SinkMBB->begin(), MBB,
+                  std::next(MachineBasicBlock::iterator(LastCMOV)), MBB->end());
+  SinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
+
+  // Add the true and fallthrough blocks as its successors.
+  if (CascadedCMOV) {
+    // The fallthrough block may be Jcc1MBB, if we have a cascaded CMOV.
+    MBB->addSuccessor(Jcc1MBB);
+
+    // In that case, Jcc1MBB will itself fallthrough the Copy0MBB, and
+    // jump to the SinkMBB.
+    Jcc1MBB->addSuccessor(Copy0MBB);
+    Jcc1MBB->addSuccessor(SinkMBB);
+  } else {
+    MBB->addSuccessor(Copy0MBB);
+  }
+
+  // The true block target of the first (or only) branch is always SinkMBB.
+  MBB->addSuccessor(SinkMBB);
+
+  // Create the conditional branch instruction.
+  unsigned Opc = M68k::GetCondBranchFromCond(CC);
+  BuildMI(MBB, DL, TII->get(Opc)).addMBB(SinkMBB);
+
+  if (CascadedCMOV) {
+    unsigned Opc2 = M68k::GetCondBranchFromCond(
+        (M68k::CondCode)CascadedCMOV->getOperand(3).getImm());
+    BuildMI(Jcc1MBB, DL, TII->get(Opc2)).addMBB(SinkMBB);
+  }
+
+  //  Copy0MBB:
+  //   %FalseValue = ...
+  //   # fallthrough to SinkMBB
+  Copy0MBB->addSuccessor(SinkMBB);
+
+  //  SinkMBB:
+  //   %Result = phi [ %FalseValue, Copy0MBB ], [ %TrueValue, ThisMBB ]
+  //  ...
+  MachineBasicBlock::iterator MIItBegin = MachineBasicBlock::iterator(MI);
+  MachineBasicBlock::iterator MIItEnd =
+      std::next(MachineBasicBlock::iterator(LastCMOV));
+  MachineBasicBlock::iterator SinkInsertionPoint = SinkMBB->begin();
+  DenseMap<unsigned, std::pair<unsigned, unsigned>> RegRewriteTable;
+  MachineInstrBuilder MIB;
+
+  // As we are creating the PHIs, we have to be careful if there is more than
+  // one.  Later CMOVs may reference the results of earlier CMOVs, but later
+  // PHIs have to reference the individual true/false inputs from earlier PHIs.
+  // That also means that PHI construction must work forward from earlier to
+  // later, and that the code must maintain a mapping from earlier PHI's
+  // destination registers, and the registers that went into the PHI.
+
+  for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) {
+    unsigned DestReg = MIIt->getOperand(0).getReg();
+    unsigned Op1Reg = MIIt->getOperand(1).getReg();
+    unsigned Op2Reg = MIIt->getOperand(2).getReg();
+
+    // If this CMOV we are generating is the opposite condition from
+    // the jump we generated, then we have to swap the operands for the
+    // PHI that is going to be generated.
+    if (MIIt->getOperand(3).getImm() == OppCC)
+      std::swap(Op1Reg, Op2Reg);
+
+    if (RegRewriteTable.find(Op1Reg) != RegRewriteTable.end())
+      Op1Reg = RegRewriteTable[Op1Reg].first;
+
+    if (RegRewriteTable.find(Op2Reg) != RegRewriteTable.end())
+      Op2Reg = RegRewriteTable[Op2Reg].second;
+
+    MIB =
+        BuildMI(*SinkMBB, SinkInsertionPoint, DL, TII->get(M68k::PHI), DestReg)
+            .addReg(Op1Reg)
+            .addMBB(Copy0MBB)
+            .addReg(Op2Reg)
+            .addMBB(ThisMBB);
+
+    // Add this PHI to the rewrite table.
+    RegRewriteTable[DestReg] = std::make_pair(Op1Reg, Op2Reg);
+  }
+
+  // If we have a cascaded CMOV, the second Jcc provides the same incoming
+  // value as the first Jcc (the True operand of the SELECT_CC/CMOV nodes).
+  if (CascadedCMOV) {
+    MIB.addReg(MI.getOperand(2).getReg()).addMBB(Jcc1MBB);
+    // Copy the PHI result to the register defined by the second CMOV.
+    BuildMI(*SinkMBB, std::next(MachineBasicBlock::iterator(MIB.getInstr())),
+            DL, TII->get(TargetOpcode::COPY),
+            CascadedCMOV->getOperand(0).getReg())
+        .addReg(MI.getOperand(0).getReg());
+    CascadedCMOV->eraseFromParent();
+  }
+
+  // Now remove the CMOV(s).
+  for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd;)
+    (MIIt++)->eraseFromParent();
+
+  return SinkMBB;
+}
+
+MachineBasicBlock *
+M68kTargetLowering::EmitLoweredSegAlloca(MachineInstr &MI,
+                                         MachineBasicBlock *BB) const {
+  llvm_unreachable("Cannot lower Segmented Stack Alloca with stack-split on");
+}
+
+MachineBasicBlock *
+M68kTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
+                                                MachineBasicBlock *BB) const {
+  switch (MI.getOpcode()) {
+  default:
+    llvm_unreachable("Unexpected instr type to insert");
+  case M68k::CMOV8d:
+  case M68k::CMOV16d:
+  case M68k::CMOV32r:
+    return EmitLoweredSelect(MI, BB);
+  case M68k::SALLOCA:
+    return EmitLoweredSegAlloca(MI, BB);
+  }
+}
+
+SDValue M68kTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
+  MachineFunction &MF = DAG.getMachineFunction();
+  auto PtrVT = getPointerTy(MF.getDataLayout());
+  M68kMachineFunctionInfo *FuncInfo = MF.getInfo<M68kMachineFunctionInfo>();
+
+  const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
+  SDLoc DL(Op);
+
+  // vastart just stores the address of the VarArgsFrameIndex slot into the
+  // memory location argument.
+  SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
+  return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
+                      MachinePointerInfo(SV));
+}
+
+// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
+// Calls to _alloca are needed to probe the stack when allocating more than 4k
+// bytes in one go. Touching the stack at 4K increments is necessary to ensure
+// that the guard pages used by the OS virtual memory manager are allocated in
+// correct sequence.
+SDValue M68kTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
+                                                    SelectionDAG &DAG) const {
+  MachineFunction &MF = DAG.getMachineFunction();
+  bool SplitStack = MF.shouldSplitStack();
+
+  SDLoc DL(Op);
+
+  // Get the inputs.
+  SDNode *Node = Op.getNode();
+  SDValue Chain = Op.getOperand(0);
+  SDValue Size = Op.getOperand(1);
+  unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
+  EVT VT = Node->getValueType(0);
+
+  // Chain the dynamic stack allocation so that it doesn't modify the stack
+  // pointer when other instructions are using the stack.
+  Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
+
+  SDValue Result;
+  if (SplitStack) {
+    auto &MRI = MF.getRegInfo();
+    auto SPTy = getPointerTy(DAG.getDataLayout());
+    auto *ARClass = getRegClassFor(SPTy);
+    unsigned Vreg = MRI.createVirtualRegister(ARClass);
+    Chain = DAG.getCopyToReg(Chain, DL, Vreg, Size);
+    Result = DAG.getNode(M68kISD::SEG_ALLOCA, DL, SPTy, Chain,
+                         DAG.getRegister(Vreg, SPTy));
+  } else {
+    auto &TLI = DAG.getTargetLoweringInfo();
+    unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
+    assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
+                    " not tell us which reg is the stack pointer!");
+
+    SDValue SP = DAG.getCopyFromReg(Chain, DL, SPReg, VT);
+    Chain = SP.getValue(1);
+    const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
+    unsigned StackAlign = TFI.getStackAlignment();
+    Result = DAG.getNode(ISD::SUB, DL, VT, SP, Size); // Value
+    if (Align > StackAlign)
+      Result = DAG.getNode(ISD::AND, DL, VT, Result,
+                           DAG.getConstant(-(uint64_t)Align, DL, VT));
+    Chain = DAG.getCopyToReg(Chain, DL, SPReg, Result); // Output chain
+  }
+
+  Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, DL, true),
+                             DAG.getIntPtrConstant(0, DL, true), SDValue(), DL);
+
+  SDValue Ops[2] = {Result, Chain};
+  return DAG.getMergeValues(Ops, DL);
+}
+
+//===----------------------------------------------------------------------===//
+// DAG Combine
+//===----------------------------------------------------------------------===//
+
+static SDValue getSETCC(M68k::CondCode Cond, SDValue CCR, const SDLoc &dl,
+                        SelectionDAG &DAG) {
+  return DAG.getNode(M68kISD::SETCC, dl, MVT::i8,
+                     DAG.getConstant(Cond, dl, MVT::i8), CCR);
+}
+// When legalizing carry, we create carries via add X, -1
+// If that comes from an actual carry, via setcc, we use the
+// carry directly.
+static SDValue combineCarryThroughADD(SDValue CCR) {
+  if (CCR.getOpcode() == M68kISD::ADD) {
+    if (isAllOnesConstant(CCR.getOperand(1))) {
+      SDValue Carry = CCR.getOperand(0);
+      while (Carry.getOpcode() == ISD::TRUNCATE ||
+             Carry.getOpcode() == ISD::ZERO_EXTEND ||
+             Carry.getOpcode() == ISD::SIGN_EXTEND ||
+             Carry.getOpcode() == ISD::ANY_EXTEND ||
+             (Carry.getOpcode() == ISD::AND &&
+              isOneConstant(Carry.getOperand(1))))
+        Carry = Carry.getOperand(0);
+      if (Carry.getOpcode() == M68kISD::SETCC ||
+          Carry.getOpcode() == M68kISD::SETCC_CARRY) {
+        if (Carry.getConstantOperandVal(0) == M68k::COND_CS)
+          return Carry.getOperand(1);
+      }
+    }
+  }
+
+  return SDValue();
+}
+
+/// Optimize a CCR definition used according to the condition code \p CC into
+/// a simpler CCR value, potentially returning a new \p CC and replacing uses
+/// of chain values.
+static SDValue combineSetCCCCR(SDValue CCR, M68k::CondCode &CC,
+                               SelectionDAG &DAG,
+                               const M68kSubtarget &Subtarget) {
+  if (CC == M68k::COND_CS)
+    if (SDValue Flags = combineCarryThroughADD(CCR))
+      return Flags;
+
+  return SDValue();
+}
+
+// Optimize  RES = M68kISD::SETCC CONDCODE, CCR_INPUT
+static SDValue combineM68kSetCC(SDNode *N, SelectionDAG &DAG,
+                                const M68kSubtarget &Subtarget) {
+  SDLoc DL(N);
+  M68k::CondCode CC = M68k::CondCode(N->getConstantOperandVal(0));
+  SDValue CCR = N->getOperand(1);
+
+  // Try to simplify the CCR and condition code operands.
+  if (SDValue Flags = combineSetCCCCR(CCR, CC, DAG, Subtarget))
+    return getSETCC(CC, Flags, DL, DAG);
+
+  return SDValue();
+}
+static SDValue combineM68kBrCond(SDNode *N, SelectionDAG &DAG,
+                                 const M68kSubtarget &Subtarget) {
+  SDLoc DL(N);
+  M68k::CondCode CC = M68k::CondCode(N->getConstantOperandVal(2));
+  SDValue CCR = N->getOperand(3);
+
+  // Try to simplify the CCR and condition code operands.
+  // Make sure to not keep references to operands, as combineSetCCCCR can
+  // RAUW them under us.
+  if (SDValue Flags = combineSetCCCCR(CCR, CC, DAG, Subtarget)) {
+    SDValue Cond = DAG.getConstant(CC, DL, MVT::i8);
+    return DAG.getNode(M68kISD::BRCOND, DL, N->getVTList(), N->getOperand(0),
+                       N->getOperand(1), Cond, Flags);
+  }
+
+  return SDValue();
+}
+
+static SDValue combineSUBX(SDNode *N, SelectionDAG &DAG) {
+  if (SDValue Flags = combineCarryThroughADD(N->getOperand(2))) {
+    MVT VT = N->getSimpleValueType(0);
+    SDVTList VTs = DAG.getVTList(VT, MVT::i32);
+    return DAG.getNode(M68kISD::SUBX, SDLoc(N), VTs, N->getOperand(0),
+                       N->getOperand(1), Flags);
+  }
+
+  return SDValue();
+}
+
+// Optimize RES, CCR = M68kISD::ADDX LHS, RHS, CCR
+static SDValue combineADDX(SDNode *N, SelectionDAG &DAG,
+                           TargetLowering::DAGCombinerInfo &DCI) {
+  if (SDValue Flags = combineCarryThroughADD(N->getOperand(2))) {
+    MVT VT = N->getSimpleValueType(0);
+    SDVTList VTs = DAG.getVTList(VT, MVT::i32);
+    return DAG.getNode(M68kISD::ADDX, SDLoc(N), VTs, N->getOperand(0),
+                       N->getOperand(1), Flags);
+  }
+
+  return SDValue();
+}
+
+SDValue M68kTargetLowering::PerformDAGCombine(SDNode *N,
+                                              DAGCombinerInfo &DCI) const {
+  SelectionDAG &DAG = DCI.DAG;
+  switch (N->getOpcode()) {
+  case M68kISD::SUBX:
+    return combineSUBX(N, DAG);
+  case M68kISD::ADDX:
+    return combineADDX(N, DAG, DCI);
+  case M68kISD::SETCC:
+    return combineM68kSetCC(N, DAG, Subtarget);
+  case M68kISD::BRCOND:
+    return combineM68kBrCond(N, DAG, Subtarget);
+  }
+
+  return SDValue();
+}
+
+//===----------------------------------------------------------------------===//
+// M68kISD Node Names
+//===----------------------------------------------------------------------===//
+const char *M68kTargetLowering::getTargetNodeName(unsigned Opcode) const {
+  switch (Opcode) {
+  case M68kISD::CALL:
+    return "M68kISD::CALL";
+  case M68kISD::TAIL_CALL:
+    return "M68kISD::TAIL_CALL";
+  case M68kISD::RET:
+    return "M68kISD::RET";
+  case M68kISD::TC_RETURN:
+    return "M68kISD::TC_RETURN";
+  case M68kISD::ADD:
+    return "M68kISD::ADD";
+  case M68kISD::SUB:
+    return "M68kISD::SUB";
+  case M68kISD::ADDX:
+    return "M68kISD::ADDX";
+  case M68kISD::SUBX:
+    return "M68kISD::SUBX";
+  case M68kISD::SMUL:
+    return "M68kISD::SMUL";
+  case M68kISD::UMUL:
+    return "M68kISD::UMUL";
+  case M68kISD::OR:
+    return "M68kISD::OR";
+  case M68kISD::XOR:
+    return "M68kISD::XOR";
+  case M68kISD::AND:
+    return "M68kISD::AND";
+  case M68kISD::CMP:
+    return "M68kISD::CMP";
+  case M68kISD::BT:
+    return "M68kISD::BT";
+  case M68kISD::SELECT:
+    return "M68kISD::SELECT";
+  case M68kISD::CMOV:
+    return "M68kISD::CMOV";
+  case M68kISD::BRCOND:
+    return "M68kISD::BRCOND";
+  case M68kISD::SETCC:
+    return "M68kISD::SETCC";
+  case M68kISD::SETCC_CARRY:
+    return "M68kISD::SETCC_CARRY";
+  case M68kISD::GLOBAL_BASE_REG:
+    return "M68kISD::GLOBAL_BASE_REG";
+  case M68kISD::Wrapper:
+    return "M68kISD::Wrapper";
+  case M68kISD::WrapperPC:
+    return "M68kISD::WrapperPC";
+  case M68kISD::SEG_ALLOCA:
+    return "M68kISD::SEG_ALLOCA";
+  default:
+    return NULL;
+  }
+}

diff  --git a/llvm/lib/Target/M68k/M68kISelLowering.h b/llvm/lib/Target/M68k/M68kISelLowering.h
new file mode 100644
index 000000000000..004ebe0929fb
--- /dev/null
+++ b/llvm/lib/Target/M68k/M68kISelLowering.h
@@ -0,0 +1,265 @@
+//===-- M68kISelLowering.h - M68k DAG Lowering Interface ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the interfaces that M68k uses to lower LLVM code into a
+/// selection DAG.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_M68K_M68KISELLOWERING_H
+#define LLVM_LIB_TARGET_M68K_M68KISELLOWERING_H
+
+#include "M68k.h"
+
+#include "llvm/CodeGen/CallingConvLower.h"
+#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/CodeGen/TargetLowering.h"
+#include "llvm/IR/Function.h"
+
+#include <deque>
+
+namespace llvm {
+namespace M68kISD {
+
+/// M68k Specific DAG nodes
+enum NodeType {
+  /// Start the numbering from where ISD NodeType finishes.
+  FIRST_NUMBER = ISD::BUILTIN_OP_END,
+
+  CALL,
+  RET,
+  TAIL_CALL,
+  TC_RETURN,
+
+  /// M68k compare and logical compare instructions. Subtracts the source
+  /// operand from the destination data register and sets the condition
+  /// codes according to the result. Immediate always goes first.
+  CMP,
+
+  /// M68k bit-test instructions.
+  BT,
+
+  /// M68k Select
+  SELECT,
+
+  /// M68k SetCC. Operand 0 is condition code, and operand 1 is the CCR
+  /// operand, usually produced by a CMP instruction.
+  SETCC,
+
+  // Same as SETCC except it's materialized with a subx and the value is all
+  // one's or all zero's.
+  SETCC_CARRY, // R = carry_bit ? ~0 : 0
+
+  /// M68k conditional moves. Operand 0 and operand 1 are the two values
+  /// to select from. Operand 2 is the condition code, and operand 3 is the
+  /// flag operand produced by a CMP or TEST instruction. It also writes a
+  /// flag result.
+  CMOV,
+
+  /// M68k conditional branches. Operand 0 is the chain operand, operand 1
+  /// is the block to branch if condition is true, operand 2 is the
+  /// condition code, and operand 3 is the flag operand produced by a CMP
+  /// or TEST instruction.
+  BRCOND,
+
+  // Arithmetic operations with CCR results.
+  ADD,
+  SUB,
+  ADDX,
+  SUBX,
+  SMUL,
+  UMUL,
+  OR,
+  XOR,
+  AND,
+
+  // GlobalBaseReg,
+  GLOBAL_BASE_REG,
+
+  /// A wrapper node for TargetConstantPool,
+  /// TargetExternalSymbol, and TargetGlobalAddress.
+  Wrapper,
+
+  /// Special wrapper used under M68k PIC mode for PC
+  /// relative displacements.
+  WrapperPC,
+
+  // For allocating variable amounts of stack space when using
+  // segmented stacks. Check if the current stacklet has enough space, and
+  // falls back to heap allocation if not.
+  SEG_ALLOCA,
+};
+} // namespace M68kISD
+
+/// Define some predicates that are used for node matching.
+namespace M68k {
+
+/// Determines whether the callee is required to pop its
+/// own arguments. Callee pop is necessary to support tail calls.
+bool isCalleePop(CallingConv::ID CallingConv, bool IsVarArg, bool GuaranteeTCO);
+
+} // end namespace M68k
+
+//===--------------------------------------------------------------------===//
+// TargetLowering Implementation
+//===--------------------------------------------------------------------===//
+
+class M68kMachineFunctionInfo;
+class M68kSubtarget;
+
+class M68kTargetLowering : public TargetLowering {
+  const M68kSubtarget &Subtarget;
+  const M68kTargetMachine &TM;
+
+public:
+  explicit M68kTargetLowering(const M68kTargetMachine &TM,
+                              const M68kSubtarget &STI);
+
+  static const M68kTargetLowering *create(const M68kTargetMachine &TM,
+                                          const M68kSubtarget &STI);
+
+  const char *getTargetNodeName(unsigned Opcode) const override;
+
+  /// Return the value type to use for ISD::SETCC.
+  EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
+                         EVT VT) const override;
+
+  /// EVT is not used in-tree, but is used by out-of-tree target.
+  virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override;
+
+  /// Provide custom lowering hooks for some operations.
+  SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
+
+  /// Return the entry encoding for a jump table in the current function.
+  /// The returned value is a member of the  MachineJumpTableInfo::JTEntryKind
+  /// enum.
+  unsigned getJumpTableEncoding() const override;
+
+  const MCExpr *LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
+                                          const MachineBasicBlock *MBB,
+                                          unsigned uid,
+                                          MCContext &Ctx) const override;
+
+  /// Returns relocation base for the given PIC jumptable.
+  SDValue getPICJumpTableRelocBase(SDValue Table,
+                                   SelectionDAG &DAG) const override;
+
+  /// This returns the relocation base for the given PIC jumptable,
+  /// the same as getPICJumpTableRelocBase, but as an MCExpr.
+  const MCExpr *getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
+                                             unsigned JTI,
+                                             MCContext &Ctx) const override;
+
+  MachineBasicBlock *
+  EmitInstrWithCustomInserter(MachineInstr &MI,
+                              MachineBasicBlock *MBB) const override;
+
+private:
+  unsigned GetAlignedArgumentStackSize(unsigned StackSize,
+                                       SelectionDAG &DAG) const;
+
+  SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const;
+
+  /// Emit a load of return address if tail call
+  /// optimization is performed and it is required.
+  SDValue EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr,
+                                  SDValue Chain, bool IsTailCall, int FPDiff,
+                                  const SDLoc &DL) const;
+
+  /// Emit a store of the return address if tail call
+  /// optimization is performed and it is required (FPDiff!=0).
+  SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
+                                   SDValue Chain, SDValue RetAddrFrIdx,
+                                   EVT PtrVT, unsigned SlotSize, int FPDiff,
+                                   const SDLoc &DL) const;
+
+  SDValue LowerMemArgument(SDValue Chain, CallingConv::ID CallConv,
+                           const SmallVectorImpl<ISD::InputArg> &ArgInfo,
+                           const SDLoc &DL, SelectionDAG &DAG,
+                           const CCValAssign &VA, MachineFrameInfo &MFI,
+                           unsigned ArgIdx) const;
+
+  SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
+                           const SDLoc &DL, SelectionDAG &DAG,
+                           const CCValAssign &VA, ISD::ArgFlagsTy Flags) const;
+
+  SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) const;
+  SDValue LowerToBT(SDValue And, ISD::CondCode CC, const SDLoc &DL,
+                    SelectionDAG &DAG) const;
+  SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
+  SDValue LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) const;
+  SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
+  SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
+  SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) const;
+  SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
+  SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
+  SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const;
+  SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
+  SDValue LowerGlobalAddress(const GlobalValue *GV, const SDLoc &DL,
+                             int64_t Offset, SelectionDAG &DAG) const;
+  SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
+  SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
+  SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
+
+  SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
+                          CallingConv::ID CallConv, bool IsVarArg,
+                          const SmallVectorImpl<ISD::InputArg> &Ins,
+                          const SDLoc &DL, SelectionDAG &DAG,
+                          SmallVectorImpl<SDValue> &InVals) const;
+
+  /// LowerFormalArguments - transform physical registers into virtual
+  /// registers and generate load operations for arguments places on the stack.
+  SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CCID,
+                               bool IsVarArg,
+                               const SmallVectorImpl<ISD::InputArg> &Ins,
+                               const SDLoc &DL, SelectionDAG &DAG,
+                               SmallVectorImpl<SDValue> &InVals) const override;
+
+  SDValue LowerCall(CallLoweringInfo &CLI,
+                    SmallVectorImpl<SDValue> &InVals) const override;
+
+  /// Lower the result values of a call into the
+  /// appropriate copies out of appropriate physical registers.
+  SDValue LowerReturn(SDValue Chain, CallingConv::ID CCID, bool IsVarArg,
+                      const SmallVectorImpl<ISD::OutputArg> &Outs,
+                      const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
+                      SelectionDAG &DAG) const override;
+
+  bool decomposeMulByConstant(LLVMContext &Context, EVT VT,
+                              SDValue C) const override;
+
+  MachineBasicBlock *EmitLoweredSelect(MachineInstr &I,
+                                       MachineBasicBlock *MBB) const;
+  MachineBasicBlock *EmitLoweredSegAlloca(MachineInstr &MI,
+                                          MachineBasicBlock *BB) const;
+
+  /// Emit nodes that will be selected as "test Op0,Op0", or something
+  /// equivalent, for use with the given M68k condition code.
+  SDValue EmitTest(SDValue Op0, unsigned M68kCC, const SDLoc &dl,
+                   SelectionDAG &DAG) const;
+
+  /// Emit nodes that will be selected as "cmp Op0,Op1", or something
+  /// equivalent, for use with the given M68k condition code.
+  SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned M68kCC, const SDLoc &dl,
+                  SelectionDAG &DAG) const;
+
+  /// Check whether the call is eligible for tail call optimization. Targets
+  /// that want to do tail call optimization should implement this function.
+  bool IsEligibleForTailCallOptimization(
+      SDValue Callee, CallingConv::ID CalleeCC, bool IsVarArg,
+      bool IsCalleeStructRet, bool IsCallerStructRet, Type *RetTy,
+      const SmallVectorImpl<ISD::OutputArg> &Outs,
+      const SmallVectorImpl<SDValue> &OutVals,
+      const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const;
+
+  SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
+};
+} // namespace llvm
+
+#endif // M68kISELLOWERING_H

diff  --git a/llvm/lib/Target/M68k/M68kInstrBuilder.h b/llvm/lib/Target/M68k/M68kInstrBuilder.h
new file mode 100644
index 000000000000..e32b1b047a2b
--- /dev/null
+++ b/llvm/lib/Target/M68k/M68kInstrBuilder.h
@@ -0,0 +1,94 @@
+//===-- M68kInstrBuilder.h - Functions to build M68k insts --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file exposes functions that may be used with BuildMI from the
+/// MachineInstrBuilder.h file to handle M68k'isms in a clean way.
+///
+/// TODO The BuildMem function may be used with the BuildMI function to add
+/// entire memory references in a single, typed, function call.  M68k memory
+/// references can be very complex expressions (described in the README), so
+/// wrapping them up behind an easier to use interface makes sense.
+/// Descriptions of the functions are included below.
+///
+/// For reference, the order of operands for memory references is:
+/// (Operand), Base, Scale, Index, Displacement.
+///
+//===----------------------------------------------------------------------===//
+//
+#ifndef LLVM_LIB_TARGET_M68K_M68KINSTRBUILDER_H
+#define LLVM_LIB_TARGET_M68K_M68KINSTRBUILDER_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
+#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/MC/MCInstrDesc.h"
+
+#include <cassert>
+
+namespace llvm {
+namespace M68k {
+static inline const MachineInstrBuilder &
+addOffset(const MachineInstrBuilder &MIB, int Offset) {
+  return MIB.addImm(Offset);
+}
+
+/// addRegIndirectWithDisp - This function is used to add a memory reference
+/// of the form (Offset, Base), i.e., one with no scale or index, but with a
+/// displacement. An example is: (4,D0).
+static inline const MachineInstrBuilder &
+addRegIndirectWithDisp(const MachineInstrBuilder &MIB, Register Reg,
+                       bool IsKill, int Offset) {
+  return MIB.addImm(Offset).addReg(Reg, getKillRegState(IsKill));
+}
+
+/// addFrameReference - This function is used to add a reference to the base of
+/// an abstract object on the stack frame of the current function.  This
+/// reference has base register as the FrameIndex offset until it is resolved.
+/// This allows a constant offset to be specified as well...
+static inline const MachineInstrBuilder &
+addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset = 0) {
+  MachineInstr *MI = MIB;
+  MachineFunction &MF = *MI->getParent()->getParent();
+  MachineFrameInfo &MFI = MF.getFrameInfo();
+  const MCInstrDesc &MCID = MI->getDesc();
+  auto Flags = MachineMemOperand::MONone;
+  if (MCID.mayLoad())
+    Flags |= MachineMemOperand::MOLoad;
+  if (MCID.mayStore())
+    Flags |= MachineMemOperand::MOStore;
+  MachineMemOperand *MMO = MF.getMachineMemOperand(
+      MachinePointerInfo::getFixedStack(MF, FI, Offset), Flags,
+      MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
+  return MIB.addImm(Offset).addFrameIndex(FI).addMemOperand(MMO);
+}
+
+static inline const MachineInstrBuilder &
+addMemOperand(const MachineInstrBuilder &MIB, int FI, int Offset = 0) {
+  MachineInstr *MI = MIB;
+  MachineFunction &MF = *MI->getParent()->getParent();
+  MachineFrameInfo &MFI = MF.getFrameInfo();
+  const MCInstrDesc &MCID = MI->getDesc();
+  auto Flags = MachineMemOperand::MONone;
+  if (MCID.mayLoad())
+    Flags |= MachineMemOperand::MOLoad;
+  if (MCID.mayStore())
+    Flags |= MachineMemOperand::MOStore;
+  MachineMemOperand *MMO = MF.getMachineMemOperand(
+      MachinePointerInfo::getFixedStack(MF, FI, Offset), Flags,
+      MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
+  return MIB.addMemOperand(MMO);
+}
+} // end namespace M68k
+} // end namespace llvm
+
+#endif // LLVM_LIB_TARGET_M6800_M6800INSTRBUILDER_H

diff  --git a/llvm/lib/Target/M68k/M68kInstrInfo.cpp b/llvm/lib/Target/M68k/M68kInstrInfo.cpp
new file mode 100644
index 000000000000..9a1eef404ca4
--- /dev/null
+++ b/llvm/lib/Target/M68k/M68kInstrInfo.cpp
@@ -0,0 +1,870 @@
+//===-- M68kInstrInfo.cpp - M68k Instruction Information ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains the M68k declaration of the TargetInstrInfo class.
+///
+//===----------------------------------------------------------------------===//
+
+#include "M68kInstrInfo.h"
+
+#include "M68kInstrBuilder.h"
+#include "M68kMachineFunction.h"
+#include "M68kTargetMachine.h"
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/ScopeExit.h"
+#include "llvm/CodeGen/LivePhysRegs.h"
+#include "llvm/CodeGen/LiveVariables.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/TargetRegistry.h"
+
+#include <functional>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "M68k-instr-info"
+
+#define GET_INSTRINFO_CTOR_DTOR
+#include "M68kGenInstrInfo.inc"
+
+// Pin the vtable to this file.
+void M68kInstrInfo::anchor() {}
+
+M68kInstrInfo::M68kInstrInfo(const M68kSubtarget &STI)
+    : M68kGenInstrInfo(M68k::ADJCALLSTACKDOWN, M68k::ADJCALLSTACKUP, 0,
+                       M68k::RET),
+      Subtarget(STI), RI(STI) {}
+
+static M68k::CondCode getCondFromBranchOpc(unsigned BrOpc) {
+  switch (BrOpc) {
+  default:
+    return M68k::COND_INVALID;
+  case M68k::Beq8:
+    return M68k::COND_EQ;
+  case M68k::Bne8:
+    return M68k::COND_NE;
+  case M68k::Blt8:
+    return M68k::COND_LT;
+  case M68k::Ble8:
+    return M68k::COND_LE;
+  case M68k::Bgt8:
+    return M68k::COND_GT;
+  case M68k::Bge8:
+    return M68k::COND_GE;
+  case M68k::Bcs8:
+    return M68k::COND_CS;
+  case M68k::Bls8:
+    return M68k::COND_LS;
+  case M68k::Bhi8:
+    return M68k::COND_HI;
+  case M68k::Bcc8:
+    return M68k::COND_CC;
+  case M68k::Bmi8:
+    return M68k::COND_MI;
+  case M68k::Bpl8:
+    return M68k::COND_PL;
+  case M68k::Bvs8:
+    return M68k::COND_VS;
+  case M68k::Bvc8:
+    return M68k::COND_VC;
+  }
+}
+
+bool M68kInstrInfo::AnalyzeBranchImpl(MachineBasicBlock &MBB,
+                                      MachineBasicBlock *&TBB,
+                                      MachineBasicBlock *&FBB,
+                                      SmallVectorImpl<MachineOperand> &Cond,
+                                      bool AllowModify) const {
+
+  auto UncondBranch =
+      std::pair<MachineBasicBlock::reverse_iterator, MachineBasicBlock *>{
+          MBB.rend(), nullptr};
+
+  // Erase any instructions if allowed at the end of the scope.
+  std::vector<std::reference_wrapper<llvm::MachineInstr>> EraseList;
+  auto FinalizeOnReturn = llvm::make_scope_exit([&EraseList] {
+    std::for_each(EraseList.begin(), EraseList.end(),
+                  [](decltype(EraseList)::value_type &ref) {
+                    ref.get().eraseFromParent();
+                  });
+  });
+
+  // Start from the bottom of the block and work up, examining the
+  // terminator instructions.
+  for (auto iter = MBB.rbegin(); iter != MBB.rend(); iter = std::next(iter)) {
+
+    unsigned Opcode = iter->getOpcode();
+
+    if (iter->isDebugInstr())
+      continue;
+
+    // Working from the bottom, when we see a non-terminator instruction, we're
+    // done.
+    if (!isUnpredicatedTerminator(*iter))
+      break;
+
+    // A terminator that isn't a branch can't easily be handled by this
+    // analysis.
+    if (!iter->isBranch())
+      return true;
+
+    // Handle unconditional branches.
+    if (Opcode == M68k::BRA8 || Opcode == M68k::BRA16) {
+      if (!iter->getOperand(0).isMBB())
+        return true;
+      UncondBranch = {iter, iter->getOperand(0).getMBB()};
+
+      // TBB is used to indicate the unconditional destination.
+      TBB = UncondBranch.second;
+
+      if (!AllowModify)
+        continue;
+
+      // If the block has any instructions after a JMP, erase them.
+      EraseList.insert(EraseList.begin(), MBB.rbegin(), iter);
+
+      Cond.clear();
+      FBB = nullptr;
+
+      // Erase the JMP if it's equivalent to a fall-through.
+      if (MBB.isLayoutSuccessor(UncondBranch.second)) {
+        TBB = nullptr;
+        EraseList.push_back(*iter);
+        UncondBranch = {MBB.rend(), nullptr};
+      }
+
+      continue;
+    }
+
+    // Handle conditional branches.
+    auto BranchCode = M68k::GetCondFromBranchOpc(Opcode);
+
+    // Can't handle indirect branch.
+    if (BranchCode == M68k::COND_INVALID)
+      return true;
+
+    // In practice we should never have an undef CCR operand, if we do
+    // abort here as we are not prepared to preserve the flag.
+    // ??? Is this required?
+    // if (iter->getOperand(1).isUndef())
+    //   return true;
+
+    // Working from the bottom, handle the first conditional branch.
+    if (Cond.empty()) {
+      if (!iter->getOperand(0).isMBB())
+        return true;
+      MachineBasicBlock *CondBranchTarget = iter->getOperand(0).getMBB();
+
+      // If we see something like this:
+      //
+      //     bcc l1
+      //     bra l2
+      //     ...
+      //   l1:
+      //     ...
+      //   l2:
+      if (UncondBranch.first != MBB.rend()) {
+
+        assert(std::next(UncondBranch.first) == iter && "Wrong block layout.");
+
+        // And we are allowed to modify the block and the target block of the
+        // conditional branch is the direct successor of this block:
+        //
+        //     bcc l1
+        //     bra l2
+        //   l1:
+        //     ...
+        //   l2:
+        //
+        // we change it to this if allowed:
+        //
+        //     bncc l2
+        //   l1:
+        //     ...
+        //   l2:
+        //
+        // Which is a bit more efficient.
+        if (AllowModify && MBB.isLayoutSuccessor(CondBranchTarget)) {
+
+          BranchCode = GetOppositeBranchCondition(BranchCode);
+          unsigned BNCC = GetCondBranchFromCond(BranchCode);
+
+          BuildMI(MBB, *UncondBranch.first, MBB.rfindDebugLoc(iter), get(BNCC))
+              .addMBB(UncondBranch.second);
+
+          EraseList.push_back(*iter);
+          EraseList.push_back(*UncondBranch.first);
+
+          TBB = UncondBranch.second;
+          FBB = nullptr;
+          Cond.push_back(MachineOperand::CreateImm(BranchCode));
+
+          // Otherwise preserve TBB, FBB and Cond as requested
+        } else {
+          TBB = CondBranchTarget;
+          FBB = UncondBranch.second;
+          Cond.push_back(MachineOperand::CreateImm(BranchCode));
+        }
+
+        UncondBranch = {MBB.rend(), nullptr};
+        continue;
+      }
+
+      TBB = CondBranchTarget;
+      FBB = nullptr;
+      Cond.push_back(MachineOperand::CreateImm(BranchCode));
+
+      continue;
+    }
+
+    // Handle subsequent conditional branches. Only handle the case where all
+    // conditional branches branch to the same destination and their condition
+    // opcodes fit one of the special multi-branch idioms.
+    assert(Cond.size() == 1);
+    assert(TBB);
+
+    // If the conditions are the same, we can leave them alone.
+    auto OldBranchCode = static_cast<M68k::CondCode>(Cond[0].getImm());
+    if (!iter->getOperand(0).isMBB())
+      return true;
+    auto NewTBB = iter->getOperand(0).getMBB();
+    if (OldBranchCode == BranchCode && TBB == NewTBB)
+      continue;
+
+    // If they 
diff er we cannot do much here.
+    return true;
+  }
+
+  return false;
+}
+
+bool M68kInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
+                                  MachineBasicBlock *&TBB,
+                                  MachineBasicBlock *&FBB,
+                                  SmallVectorImpl<MachineOperand> &Cond,
+                                  bool AllowModify) const {
+  return AnalyzeBranchImpl(MBB, TBB, FBB, Cond, AllowModify);
+}
+
+unsigned M68kInstrInfo::removeBranch(MachineBasicBlock &MBB,
+                                     int *BytesRemoved) const {
+  assert(!BytesRemoved && "code size not handled");
+
+  MachineBasicBlock::iterator I = MBB.end();
+  unsigned Count = 0;
+
+  while (I != MBB.begin()) {
+    --I;
+    if (I->isDebugValue())
+      continue;
+    if (I->getOpcode() != M68k::BRA8 &&
+        getCondFromBranchOpc(I->getOpcode()) == M68k::COND_INVALID)
+      break;
+    // Remove the branch.
+    I->eraseFromParent();
+    I = MBB.end();
+    ++Count;
+  }
+
+  return Count;
+}
+
+unsigned M68kInstrInfo::insertBranch(
+    MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
+    ArrayRef<MachineOperand> Cond, const DebugLoc &DL, int *BytesAdded) const {
+  // Shouldn't be a fall through.
+  assert(TBB && "InsertBranch must not be told to insert a fallthrough");
+  assert((Cond.size() == 1 || Cond.size() == 0) &&
+         "M68k branch conditions have one component!");
+  assert(!BytesAdded && "code size not handled");
+
+  if (Cond.empty()) {
+    // Unconditional branch?
+    assert(!FBB && "Unconditional branch with multiple successors!");
+    BuildMI(&MBB, DL, get(M68k::BRA8)).addMBB(TBB);
+    return 1;
+  }
+
+  // If FBB is null, it is implied to be a fall-through block.
+  bool FallThru = FBB == nullptr;
+
+  // Conditional branch.
+  unsigned Count = 0;
+  M68k::CondCode CC = (M68k::CondCode)Cond[0].getImm();
+  unsigned Opc = GetCondBranchFromCond(CC);
+  BuildMI(&MBB, DL, get(Opc)).addMBB(TBB);
+  ++Count;
+  if (!FallThru) {
+    // Two-way Conditional branch. Insert the second branch.
+    BuildMI(&MBB, DL, get(M68k::BRA8)).addMBB(FBB);
+    ++Count;
+  }
+  return Count;
+}
+
+void M68kInstrInfo::AddSExt(MachineBasicBlock &MBB,
+                            MachineBasicBlock::iterator I, DebugLoc DL,
+                            unsigned Reg, MVT From, MVT To) const {
+  if (From == MVT::i8) {
+    unsigned R = Reg;
+    // EXT16 requires i16 register
+    if (To == MVT::i32) {
+      R = RI.getSubReg(Reg, M68k::MxSubRegIndex16Lo);
+      assert(R && "No viable SUB register available");
+    }
+    BuildMI(MBB, I, DL, get(M68k::EXT16), R).addReg(R);
+  }
+
+  if (To == MVT::i32)
+    BuildMI(MBB, I, DL, get(M68k::EXT32), Reg).addReg(Reg);
+}
+
+void M68kInstrInfo::AddZExt(MachineBasicBlock &MBB,
+                            MachineBasicBlock::iterator I, DebugLoc DL,
+                            unsigned Reg, MVT From, MVT To) const {
+
+  unsigned Mask, And;
+  if (From == MVT::i8)
+    Mask = 0xFF;
+  else
+    Mask = 0xFFFF;
+
+  if (To == MVT::i16)
+    And = M68k::AND16di;
+  else // i32
+    And = M68k::AND32di;
+
+  // TODO use xor r,r to decrease size
+  BuildMI(MBB, I, DL, get(And), Reg).addReg(Reg).addImm(Mask);
+}
+
+bool M68kInstrInfo::ExpandMOVX_RR(MachineInstrBuilder &MIB, MVT MVTDst,
+                                  MVT MVTSrc) const {
+  unsigned Move = MVTDst == MVT::i16 ? M68k::MOV16rr : M68k::MOV32rr;
+  unsigned Dst = MIB->getOperand(0).getReg();
+  unsigned Src = MIB->getOperand(1).getReg();
+
+  assert(Dst != Src && "You cannot use the same Regs with MOVX_RR");
+
+  const auto &TRI = getRegisterInfo();
+
+  const auto *RCDst = TRI.getMaximalPhysRegClass(Dst, MVTDst);
+  const auto *RCSrc = TRI.getMaximalPhysRegClass(Src, MVTSrc);
+
+  assert(RCDst && RCSrc && "Wrong use of MOVX_RR");
+  assert(RCDst != RCSrc && "You cannot use the same Reg Classes with MOVX_RR");
+
+  // We need to find the super source register that matches the size of Dst
+  unsigned SSrc = RI.getMatchingMegaReg(Src, RCDst);
+  assert(SSrc && "No viable MEGA register available");
+
+  DebugLoc DL = MIB->getDebugLoc();
+
+  // If it happens to that super source register is the destination register
+  // we do nothing
+  if (Dst == SSrc) {
+    LLVM_DEBUG(dbgs() << "Remove " << *MIB.getInstr() << '\n');
+    MIB->eraseFromParent();
+  } else { // otherwise we need to MOV
+    LLVM_DEBUG(dbgs() << "Expand " << *MIB.getInstr() << " to MOV\n");
+    MIB->setDesc(get(Move));
+    MIB->getOperand(1).setReg(SSrc);
+  }
+
+  return true;
+}
+
+/// Expand SExt MOVE pseudos into a MOV and a EXT if the operands are two
+/// 
diff erent registers or just EXT if it is the same register
+bool M68kInstrInfo::ExpandMOVSZX_RR(MachineInstrBuilder &MIB, bool IsSigned,
+                                    MVT MVTDst, MVT MVTSrc) const {
+  LLVM_DEBUG(dbgs() << "Expand " << *MIB.getInstr() << " to ");
+
+  unsigned Move;
+
+  if (MVTDst == MVT::i16)
+    Move = M68k::MOV16rr;
+  else // i32
+    Move = M68k::MOV32rr;
+
+  unsigned Dst = MIB->getOperand(0).getReg();
+  unsigned Src = MIB->getOperand(1).getReg();
+
+  assert(Dst != Src && "You cannot use the same Regs with MOVSX_RR");
+
+  const auto &TRI = getRegisterInfo();
+
+  const auto *RCDst = TRI.getMaximalPhysRegClass(Dst, MVTDst);
+  const auto *RCSrc = TRI.getMaximalPhysRegClass(Src, MVTSrc);
+
+  assert(RCDst && RCSrc && "Wrong use of MOVSX_RR");
+  assert(RCDst != RCSrc && "You cannot use the same Reg Classes with MOVSX_RR");
+
+  // We need to find the super source register that matches the size of Dst
+  unsigned SSrc = RI.getMatchingMegaReg(Src, RCDst);
+  assert(SSrc && "No viable MEGA register available");
+
+  MachineBasicBlock &MBB = *MIB->getParent();
+  DebugLoc DL = MIB->getDebugLoc();
+
+  if (Dst != SSrc) {
+    LLVM_DEBUG(dbgs() << "Move and " << '\n');
+    BuildMI(MBB, MIB.getInstr(), DL, get(Move), Dst).addReg(SSrc);
+  }
+
+  if (IsSigned) {
+    LLVM_DEBUG(dbgs() << "Sign Extend" << '\n');
+    AddSExt(MBB, MIB.getInstr(), DL, Dst, MVTSrc, MVTDst);
+  } else {
+    LLVM_DEBUG(dbgs() << "Zero Extend" << '\n');
+    AddZExt(MBB, MIB.getInstr(), DL, Dst, MVTSrc, MVTDst);
+  }
+
+  MIB->eraseFromParent();
+
+  return true;
+}
+
+bool M68kInstrInfo::ExpandMOVSZX_RM(MachineInstrBuilder &MIB, bool IsSigned,
+                                    const MCInstrDesc &Desc, MVT MVTDst,
+                                    MVT MVTSrc) const {
+  LLVM_DEBUG(dbgs() << "Expand " << *MIB.getInstr() << " to LOAD and ");
+
+  unsigned Dst = MIB->getOperand(0).getReg();
+
+  // We need the subreg of Dst to make instruction verifier happy because the
+  // real machine instruction consumes and produces values of the same size and
+  // the registers the will be used here fall into 
diff erent classes and this
+  // makes IV cry. We could of course use bigger operation but this will put
+  // some pressure on cache and memory so no.
+  unsigned SubDst =
+      RI.getSubReg(Dst, MVTSrc == MVT::i8 ? M68k::MxSubRegIndex8Lo
+                                          : M68k::MxSubRegIndex16Lo);
+  assert(SubDst && "No viable SUB register available");
+
+  // Make this a plain move
+  MIB->setDesc(Desc);
+  MIB->getOperand(0).setReg(SubDst);
+
+  MachineBasicBlock::iterator I = MIB.getInstr();
+  I++;
+  MachineBasicBlock &MBB = *MIB->getParent();
+  DebugLoc DL = MIB->getDebugLoc();
+
+  if (IsSigned) {
+    LLVM_DEBUG(dbgs() << "Sign Extend" << '\n');
+    AddSExt(MBB, I, DL, Dst, MVTSrc, MVTDst);
+  } else {
+    LLVM_DEBUG(dbgs() << "Zero Extend" << '\n');
+    AddZExt(MBB, I, DL, Dst, MVTSrc, MVTDst);
+  }
+
+  return true;
+}
+
+bool M68kInstrInfo::ExpandPUSH_POP(MachineInstrBuilder &MIB,
+                                   const MCInstrDesc &Desc, bool IsPush) const {
+  MachineBasicBlock::iterator I = MIB.getInstr();
+  I++;
+  MachineBasicBlock &MBB = *MIB->getParent();
+  MachineOperand MO = MIB->getOperand(0);
+  DebugLoc DL = MIB->getDebugLoc();
+  if (IsPush)
+    BuildMI(MBB, I, DL, Desc).addReg(RI.getStackRegister()).add(MO);
+  else
+    BuildMI(MBB, I, DL, Desc, MO.getReg()).addReg(RI.getStackRegister());
+
+  MIB->eraseFromParent();
+  return true;
+}
+
+bool M68kInstrInfo::ExpandCCR(MachineInstrBuilder &MIB, bool IsToCCR) const {
+
+  // Replace the pseudo instruction with the real one
+  if (IsToCCR)
+    MIB->setDesc(get(M68k::MOV16cd));
+  else
+    // FIXME M68010 or later is required
+    MIB->setDesc(get(M68k::MOV16dc));
+
+  // Promote used register to the next class
+  auto &Opd = MIB->getOperand(1);
+  Opd.setReg(getRegisterInfo().getMatchingSuperReg(
+      Opd.getReg(), M68k::MxSubRegIndex8Lo, &M68k::DR16RegClass));
+
+  return true;
+}
+
+bool M68kInstrInfo::ExpandMOVEM(MachineInstrBuilder &MIB,
+                                const MCInstrDesc &Desc, bool IsRM) const {
+  int Reg = 0, Offset = 0, Base = 0;
+  auto XR32 = RI.getRegClass(M68k::XR32RegClassID);
+  auto DL = MIB->getDebugLoc();
+  auto MI = MIB.getInstr();
+  auto &MBB = *MIB->getParent();
+
+  if (IsRM) {
+    Reg = MIB->getOperand(0).getReg();
+    Offset = MIB->getOperand(1).getImm();
+    Base = MIB->getOperand(2).getReg();
+  } else {
+    Offset = MIB->getOperand(0).getImm();
+    Base = MIB->getOperand(1).getReg();
+    Reg = MIB->getOperand(2).getReg();
+  }
+
+  // If the register is not in XR32 then it is smaller than 32 bit, we
+  // implicitly promote it to 32
+  if (!XR32->contains(Reg)) {
+    Reg = RI.getMatchingMegaReg(Reg, XR32);
+    assert(Reg && "Has not meaningful MEGA register");
+  }
+
+  unsigned Mask = 1 << RI.getSpillRegisterOrder(Reg);
+  if (IsRM) {
+    BuildMI(MBB, MI, DL, Desc)
+        .addImm(Mask)
+        .addImm(Offset)
+        .addReg(Base)
+        .addReg(Reg, RegState::ImplicitDefine)
+        .copyImplicitOps(*MIB);
+  } else {
+    BuildMI(MBB, MI, DL, Desc)
+        .addImm(Offset)
+        .addReg(Base)
+        .addImm(Mask)
+        .addReg(Reg, RegState::Implicit)
+        .copyImplicitOps(*MIB);
+  }
+
+  MIB->eraseFromParent();
+
+  return true;
+}
+
+/// Expand a single-def pseudo instruction to a two-addr
+/// instruction with two undef reads of the register being defined.
+/// This is used for mapping:
+///   %d0 = SETCS_C32d
+/// to:
+///   %d0 = SUBX32dd %d0<undef>, %d0<undef>
+///
+static bool Expand2AddrUndef(MachineInstrBuilder &MIB,
+                             const MCInstrDesc &Desc) {
+  assert(Desc.getNumOperands() == 3 && "Expected two-addr instruction.");
+  unsigned Reg = MIB->getOperand(0).getReg();
+  MIB->setDesc(Desc);
+
+  // MachineInstr::addOperand() will insert explicit operands before any
+  // implicit operands.
+  MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef);
+  // But we don't trust that.
+  assert(MIB->getOperand(1).getReg() == Reg &&
+         MIB->getOperand(2).getReg() == Reg && "Misplaced operand");
+  return true;
+}
+
+bool M68kInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
+  MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI);
+  switch (MI.getOpcode()) {
+  case M68k::PUSH8d:
+    return ExpandPUSH_POP(MIB, get(M68k::MOV8ed), true);
+  case M68k::PUSH16d:
+    return ExpandPUSH_POP(MIB, get(M68k::MOV16er), true);
+  case M68k::PUSH32r:
+    return ExpandPUSH_POP(MIB, get(M68k::MOV32er), true);
+
+  case M68k::POP8d:
+    return ExpandPUSH_POP(MIB, get(M68k::MOV8do), false);
+  case M68k::POP16d:
+    return ExpandPUSH_POP(MIB, get(M68k::MOV16ro), false);
+  case M68k::POP32r:
+    return ExpandPUSH_POP(MIB, get(M68k::MOV32ro), false);
+
+  case M68k::SETCS_C8d:
+    return Expand2AddrUndef(MIB, get(M68k::SUBX8dd));
+  case M68k::SETCS_C16d:
+    return Expand2AddrUndef(MIB, get(M68k::SUBX16dd));
+  case M68k::SETCS_C32d:
+    return Expand2AddrUndef(MIB, get(M68k::SUBX32dd));
+  }
+  return false;
+}
+
+bool M68kInstrInfo::isPCRelRegisterOperandLegal(
+    const MachineOperand &MO) const {
+  assert(MO.isReg());
+  const auto *MI = MO.getParent();
+  const uint8_t *Beads = M68k::getMCInstrBeads(MI->getOpcode());
+  assert(*Beads);
+
+  // Only addressing mode k has (non-pc) register with PCRel
+  // So we're looking for EA Beads equal to
+  // `3Bits<011>_1Bit<1>_2Bits<11>`
+  // FIXME: There is an important caveat and two assumptions
+  // here: The caveat is that EA encoding always sit on the LSB.
+  // Where the assumptions are that if there are more than one
+  // operands, the EA encoding for the source operand always sit
+  // on the LSB. At the same time, k addressing mode can not be used
+  // on destination operand.
+  // The last assumption is kinda dirty so we need to find a way around
+  // it
+  const uint8_t EncEAk[3] = {0b011, 0b1, 0b11};
+  for (const uint8_t Pat : EncEAk) {
+    uint8_t Bead = *(Beads++);
+    if (!Bead)
+      return false;
+
+    switch (Bead & 0xF) {
+    default:
+      return false;
+    case M68kBeads::Bits1:
+    case M68kBeads::Bits2:
+    case M68kBeads::Bits3: {
+      uint8_t Val = (Bead & 0xF0) >> 4;
+      if (Val != Pat)
+        return false;
+    }
+    }
+  }
+  return true;
+}
+
+void M68kInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
+                                MachineBasicBlock::iterator MI,
+                                const DebugLoc &DL, MCRegister DstReg,
+                                MCRegister SrcReg, bool KillSrc) const {
+  unsigned Opc = 0;
+
+  // First deal with the normal symmetric copies.
+  if (M68k::XR32RegClass.contains(DstReg, SrcReg))
+    Opc = M68k::MOV32rr;
+  else if (M68k::XR16RegClass.contains(DstReg, SrcReg))
+    Opc = M68k::MOV16rr;
+  else if (M68k::DR8RegClass.contains(DstReg, SrcReg))
+    Opc = M68k::MOV8dd;
+
+  if (Opc) {
+    BuildMI(MBB, MI, DL, get(Opc), DstReg)
+        .addReg(SrcReg, getKillRegState(KillSrc));
+    return;
+  }
+
+  // Now deal with asymmetrically sized copies. The cases that follow are upcast
+  // moves.
+  //
+  // NOTE
+  // These moves are not aware of type nature of these values and thus
+  // won't do any SExt or ZExt and upper bits will basically contain garbage.
+  MachineInstrBuilder MIB(*MBB.getParent(), MI);
+  if (M68k::DR8RegClass.contains(SrcReg)) {
+    if (M68k::XR16RegClass.contains(DstReg))
+      Opc = M68k::MOVXd16d8;
+    else if (M68k::XR32RegClass.contains(DstReg))
+      Opc = M68k::MOVXd32d8;
+  } else if (M68k::XR16RegClass.contains(SrcReg) &&
+             M68k::XR32RegClass.contains(DstReg))
+    Opc = M68k::MOVXd32d16;
+
+  if (Opc) {
+    BuildMI(MBB, MI, DL, get(Opc), DstReg)
+        .addReg(SrcReg, getKillRegState(KillSrc));
+    return;
+  }
+
+  bool FromCCR = SrcReg == M68k::CCR;
+  bool FromSR = SrcReg == M68k::SR;
+  bool ToCCR = DstReg == M68k::CCR;
+  bool ToSR = DstReg == M68k::SR;
+
+  if (FromCCR) {
+    assert(M68k::DR8RegClass.contains(DstReg) &&
+           "Need DR8 register to copy CCR");
+    Opc = M68k::MOV8dc;
+  } else if (ToCCR) {
+    assert(M68k::DR8RegClass.contains(SrcReg) &&
+           "Need DR8 register to copy CCR");
+    Opc = M68k::MOV8cd;
+  } else if (FromSR || ToSR)
+    llvm_unreachable("Cannot emit SR copy instruction");
+
+  if (Opc) {
+    BuildMI(MBB, MI, DL, get(Opc), DstReg)
+        .addReg(SrcReg, getKillRegState(KillSrc));
+    return;
+  }
+
+  LLVM_DEBUG(dbgs() << "Cannot copy " << RI.getName(SrcReg) << " to "
+                    << RI.getName(DstReg) << '\n');
+  llvm_unreachable("Cannot emit physreg copy instruction");
+}
+
+namespace {
+unsigned getLoadStoreRegOpcode(unsigned Reg, const TargetRegisterClass *RC,
+                               const TargetRegisterInfo *TRI,
+                               const M68kSubtarget &STI, bool load) {
+  switch (TRI->getRegSizeInBits(*RC)) {
+  default:
+    llvm_unreachable("Unknown spill size");
+  case 8:
+    if (M68k::DR8RegClass.hasSubClassEq(RC))
+      return load ? M68k::MOVM8mp_P : M68k::MOVM8pm_P;
+    if (M68k::CCRCRegClass.hasSubClassEq(RC))
+      return load ? M68k::MOV16cp : M68k::MOV16pc;
+
+    llvm_unreachable("Unknown 1-byte regclass");
+  case 16:
+    assert(M68k::XR16RegClass.hasSubClassEq(RC) && "Unknown 2-byte regclass");
+    return load ? M68k::MOVM16mp_P : M68k::MOVM16pm_P;
+  case 32:
+    assert(M68k::XR32RegClass.hasSubClassEq(RC) && "Unknown 4-byte regclass");
+    return load ? M68k::MOVM32mp_P : M68k::MOVM32pm_P;
+  }
+}
+
+unsigned getStoreRegOpcode(unsigned SrcReg, const TargetRegisterClass *RC,
+                           const TargetRegisterInfo *TRI,
+                           const M68kSubtarget &STI) {
+  return getLoadStoreRegOpcode(SrcReg, RC, TRI, STI, false);
+}
+
+unsigned getLoadRegOpcode(unsigned DstReg, const TargetRegisterClass *RC,
+                          const TargetRegisterInfo *TRI,
+                          const M68kSubtarget &STI) {
+  return getLoadStoreRegOpcode(DstReg, RC, TRI, STI, true);
+}
+} // end anonymous namespace
+
+bool M68kInstrInfo::getStackSlotRange(const TargetRegisterClass *RC,
+                                      unsigned SubIdx, unsigned &Size,
+                                      unsigned &Offset,
+                                      const MachineFunction &MF) const {
+  // The slot size must be the maximum size so we can easily use MOVEM.L
+  Size = 4;
+  Offset = 0;
+  return true;
+}
+
+void M68kInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
+                                        MachineBasicBlock::iterator MI,
+                                        Register SrcReg, bool IsKill,
+                                        int FrameIndex,
+                                        const TargetRegisterClass *RC,
+                                        const TargetRegisterInfo *TRI) const {
+  const MachineFunction &MF = *MBB.getParent();
+  assert(MF.getFrameInfo().getObjectSize(FrameIndex) == 4 &&
+         "Stack slot too small for store");
+  unsigned Opc = getStoreRegOpcode(SrcReg, RC, TRI, Subtarget);
+  DebugLoc DL = MBB.findDebugLoc(MI);
+  // (0,FrameIndex) <- $reg
+  M68k::addFrameReference(BuildMI(MBB, MI, DL, get(Opc)), FrameIndex)
+      .addReg(SrcReg, getKillRegState(IsKill));
+}
+
+void M68kInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
+                                         MachineBasicBlock::iterator MI,
+                                         Register DstReg, int FrameIndex,
+                                         const TargetRegisterClass *RC,
+                                         const TargetRegisterInfo *TRI) const {
+  const MachineFunction &MF = *MBB.getParent();
+  assert(MF.getFrameInfo().getObjectSize(FrameIndex) == 4 &&
+         "Stack slot too small for store");
+  unsigned Opc = getLoadRegOpcode(DstReg, RC, TRI, Subtarget);
+  DebugLoc DL = MBB.findDebugLoc(MI);
+  M68k::addFrameReference(BuildMI(MBB, MI, DL, get(Opc), DstReg), FrameIndex);
+}
+
+/// Return a virtual register initialized with the the global base register
+/// value. Output instructions required to initialize the register in the
+/// function entry block, if necessary.
+///
+/// TODO Move this function to M68kMachineFunctionInfo.
+unsigned M68kInstrInfo::getGlobalBaseReg(MachineFunction *MF) const {
+  M68kMachineFunctionInfo *MxFI = MF->getInfo<M68kMachineFunctionInfo>();
+  unsigned GlobalBaseReg = MxFI->getGlobalBaseReg();
+  if (GlobalBaseReg != 0)
+    return GlobalBaseReg;
+
+  // Create the register. The code to initialize it is inserted later,
+  // by the CGBR pass (below).
+  //
+  // NOTE
+  // Normally M68k uses A5 register as global base pointer but this will
+  // create unnecessary spill if we use less then 4 registers in code; since A5
+  // is callee-save anyway we could try to allocate caller-save first and if
+  // lucky get one, otherwise it does not really matter which callee-save to
+  // use.
+  MachineRegisterInfo &RegInfo = MF->getRegInfo();
+  GlobalBaseReg = RegInfo.createVirtualRegister(&M68k::AR32_NOSPRegClass);
+  MxFI->setGlobalBaseReg(GlobalBaseReg);
+  return GlobalBaseReg;
+}
+
+std::pair<unsigned, unsigned>
+M68kInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
+  return std::make_pair(TF, 0u);
+}
+
+ArrayRef<std::pair<unsigned, const char *>>
+M68kInstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
+  using namespace M68kII;
+  static const std::pair<unsigned, const char *> TargetFlags[] = {
+      {MO_ABSOLUTE_ADDRESS, "m68k-absolute"},
+      {MO_PC_RELATIVE_ADDRESS, "m68k-pcrel"},
+      {MO_GOT, "m68k-got"},
+      {MO_GOTOFF, "m68k-gotoff"},
+      {MO_GOTPCREL, "m68k-gotpcrel"},
+      {MO_PLT, "m68k-plt"}};
+  return makeArrayRef(TargetFlags);
+}
+
+namespace {
+/// Create Global Base Reg pass. This initializes the PIC global base register
+struct CGBR : public MachineFunctionPass {
+  static char ID;
+  CGBR() : MachineFunctionPass(ID) {}
+
+  bool runOnMachineFunction(MachineFunction &MF) override {
+    const M68kSubtarget &STI = MF.getSubtarget<M68kSubtarget>();
+    M68kMachineFunctionInfo *MxFI = MF.getInfo<M68kMachineFunctionInfo>();
+
+    unsigned GlobalBaseReg = MxFI->getGlobalBaseReg();
+
+    // If we didn't need a GlobalBaseReg, don't insert code.
+    if (GlobalBaseReg == 0)
+      return false;
+
+    // Insert the set of GlobalBaseReg into the first MBB of the function
+    MachineBasicBlock &FirstMBB = MF.front();
+    MachineBasicBlock::iterator MBBI = FirstMBB.begin();
+    DebugLoc DL = FirstMBB.findDebugLoc(MBBI);
+    const M68kInstrInfo *TII = STI.getInstrInfo();
+
+    // Generate lea (__GLOBAL_OFFSET_TABLE_,%PC), %A5
+    BuildMI(FirstMBB, MBBI, DL, TII->get(M68k::LEA32q), GlobalBaseReg)
+        .addExternalSymbol("_GLOBAL_OFFSET_TABLE_", M68kII::MO_GOTPCREL);
+
+    return true;
+  }
+
+  StringRef getPassName() const override {
+    return "M68k PIC Global Base Reg Initialization";
+  }
+
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.setPreservesCFG();
+    MachineFunctionPass::getAnalysisUsage(AU);
+  }
+};
+} // namespace
+
+char CGBR::ID = 0;
+FunctionPass *llvm::createM68kGlobalBaseRegPass() { return new CGBR(); }

diff  --git a/llvm/lib/Target/M68k/M68kInstrInfo.h b/llvm/lib/Target/M68k/M68kInstrInfo.h
new file mode 100644
index 000000000000..e15b0a1181ba
--- /dev/null
+++ b/llvm/lib/Target/M68k/M68kInstrInfo.h
@@ -0,0 +1,342 @@
+//===-- M68kInstrInfo.h - M68k Instruction Information ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains the M68k implementation of the TargetInstrInfo class.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_M68K_M68KINSTRINFO_H
+#define LLVM_LIB_TARGET_M68K_M68KINSTRINFO_H
+
+#include "M68k.h"
+#include "M68kRegisterInfo.h"
+
+#include "MCTargetDesc/M68kBaseInfo.h"
+
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
+
+#define GET_INSTRINFO_HEADER
+#include "M68kGenInstrInfo.inc"
+
+namespace llvm {
+
+class M68kSubtarget;
+
+namespace M68k {
+// Forward declaration
+const uint8_t *getMCInstrBeads(unsigned Opcode);
+
+// These MUST be kept in sync with codes definitions in M68kInstrInfo.td
+enum CondCode {
+  COND_T = 0,   // True
+  COND_F = 1,   // False
+  COND_HI = 2,  // High
+  COND_LS = 3,  // Less or Same
+  COND_CC = 4,  // Carry Clear
+  COND_CS = 5,  // Carry Set
+  COND_NE = 6,  // Not Equal
+  COND_EQ = 7,  // Equal
+  COND_VC = 8,  // Overflow Clear
+  COND_VS = 9,  // Overflow Set
+  COND_PL = 10, // Plus
+  COND_MI = 11, // Minus
+  COND_GE = 12, // Greater or Equal
+  COND_LT = 13, // Less Than
+  COND_GT = 14, // Greater Than
+  COND_LE = 15, // Less or Equal
+  LAST_VALID_COND = COND_LE,
+  COND_INVALID
+};
+
+// FIXME would be nice tablegen to generate these predicates and converters
+// mb tag based
+
+static inline M68k::CondCode GetOppositeBranchCondition(M68k::CondCode CC) {
+  switch (CC) {
+  default:
+    llvm_unreachable("Illegal condition code!");
+  case M68k::COND_T:
+    return M68k::COND_F;
+  case M68k::COND_F:
+    return M68k::COND_T;
+  case M68k::COND_HI:
+    return M68k::COND_LS;
+  case M68k::COND_LS:
+    return M68k::COND_HI;
+  case M68k::COND_CC:
+    return M68k::COND_CS;
+  case M68k::COND_CS:
+    return M68k::COND_CC;
+  case M68k::COND_NE:
+    return M68k::COND_EQ;
+  case M68k::COND_EQ:
+    return M68k::COND_NE;
+  case M68k::COND_VC:
+    return M68k::COND_VS;
+  case M68k::COND_VS:
+    return M68k::COND_VC;
+  case M68k::COND_PL:
+    return M68k::COND_MI;
+  case M68k::COND_MI:
+    return M68k::COND_PL;
+  case M68k::COND_GE:
+    return M68k::COND_LT;
+  case M68k::COND_LT:
+    return M68k::COND_GE;
+  case M68k::COND_GT:
+    return M68k::COND_LE;
+  case M68k::COND_LE:
+    return M68k::COND_GT;
+  }
+}
+
+static inline unsigned GetCondBranchFromCond(M68k::CondCode CC) {
+  switch (CC) {
+  default:
+    llvm_unreachable("Illegal condition code!");
+  case M68k::COND_EQ:
+    return M68k::Beq8;
+  case M68k::COND_NE:
+    return M68k::Bne8;
+  case M68k::COND_LT:
+    return M68k::Blt8;
+  case M68k::COND_LE:
+    return M68k::Ble8;
+  case M68k::COND_GT:
+    return M68k::Bgt8;
+  case M68k::COND_GE:
+    return M68k::Bge8;
+  case M68k::COND_CS:
+    return M68k::Bcs8;
+  case M68k::COND_LS:
+    return M68k::Bls8;
+  case M68k::COND_HI:
+    return M68k::Bhi8;
+  case M68k::COND_CC:
+    return M68k::Bcc8;
+  case M68k::COND_MI:
+    return M68k::Bmi8;
+  case M68k::COND_PL:
+    return M68k::Bpl8;
+  case M68k::COND_VS:
+    return M68k::Bvs8;
+  case M68k::COND_VC:
+    return M68k::Bvc8;
+  }
+}
+
+static inline M68k::CondCode GetCondFromBranchOpc(unsigned Opcode) {
+  switch (Opcode) {
+  default:
+    return M68k::COND_INVALID;
+  case M68k::Beq8:
+    return M68k::COND_EQ;
+  case M68k::Bne8:
+    return M68k::COND_NE;
+  case M68k::Blt8:
+    return M68k::COND_LT;
+  case M68k::Ble8:
+    return M68k::COND_LE;
+  case M68k::Bgt8:
+    return M68k::COND_GT;
+  case M68k::Bge8:
+    return M68k::COND_GE;
+  case M68k::Bcs8:
+    return M68k::COND_CS;
+  case M68k::Bls8:
+    return M68k::COND_LS;
+  case M68k::Bhi8:
+    return M68k::COND_HI;
+  case M68k::Bcc8:
+    return M68k::COND_CC;
+  case M68k::Bmi8:
+    return M68k::COND_MI;
+  case M68k::Bpl8:
+    return M68k::COND_PL;
+  case M68k::Bvs8:
+    return M68k::COND_VS;
+  case M68k::Bvc8:
+    return M68k::COND_VC;
+  }
+}
+
+static inline unsigned IsCMP(unsigned Op) {
+  switch (Op) {
+  default:
+    return false;
+  case M68k::CMP8dd:
+  case M68k::CMP8df:
+  case M68k::CMP8di:
+  case M68k::CMP8dj:
+  case M68k::CMP8dp:
+  case M68k::CMP16dd:
+  case M68k::CMP16df:
+  case M68k::CMP16di:
+  case M68k::CMP16dj:
+  case M68k::CMP16dp:
+    return true;
+  }
+}
+
+static inline bool IsSETCC(unsigned SETCC) {
+  switch (SETCC) {
+  default:
+    return false;
+  case M68k::SETd8eq:
+  case M68k::SETd8ne:
+  case M68k::SETd8lt:
+  case M68k::SETd8ge:
+  case M68k::SETd8le:
+  case M68k::SETd8gt:
+  case M68k::SETd8cs:
+  case M68k::SETd8cc:
+  case M68k::SETd8ls:
+  case M68k::SETd8hi:
+  case M68k::SETd8pl:
+  case M68k::SETd8mi:
+  case M68k::SETd8vc:
+  case M68k::SETd8vs:
+  case M68k::SETj8eq:
+  case M68k::SETj8ne:
+  case M68k::SETj8lt:
+  case M68k::SETj8ge:
+  case M68k::SETj8le:
+  case M68k::SETj8gt:
+  case M68k::SETj8cs:
+  case M68k::SETj8cc:
+  case M68k::SETj8ls:
+  case M68k::SETj8hi:
+  case M68k::SETj8pl:
+  case M68k::SETj8mi:
+  case M68k::SETj8vc:
+  case M68k::SETj8vs:
+  case M68k::SETp8eq:
+  case M68k::SETp8ne:
+  case M68k::SETp8lt:
+  case M68k::SETp8ge:
+  case M68k::SETp8le:
+  case M68k::SETp8gt:
+  case M68k::SETp8cs:
+  case M68k::SETp8cc:
+  case M68k::SETp8ls:
+  case M68k::SETp8hi:
+  case M68k::SETp8pl:
+  case M68k::SETp8mi:
+  case M68k::SETp8vc:
+  case M68k::SETp8vs:
+    return true;
+  }
+}
+
+} // namespace M68k
+
+class M68kInstrInfo : public M68kGenInstrInfo {
+  virtual void anchor();
+
+protected:
+  const M68kSubtarget &Subtarget;
+  const M68kRegisterInfo RI;
+
+public:
+  explicit M68kInstrInfo(const M68kSubtarget &STI);
+
+  static const M68kInstrInfo *create(M68kSubtarget &STI);
+
+  /// TargetInstrInfo is a superset of MRegister info. As such, whenever a
+  /// client has an instance of instruction info, it should always be able to
+  /// get register info as well (through this method).
+  const M68kRegisterInfo &getRegisterInfo() const { return RI; };
+
+  bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+                     MachineBasicBlock *&FBB,
+                     SmallVectorImpl<MachineOperand> &Cond,
+                     bool AllowModify) const override;
+
+  bool AnalyzeBranchImpl(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+                         MachineBasicBlock *&FBB,
+                         SmallVectorImpl<MachineOperand> &Cond,
+                         bool AllowModify) const;
+
+  unsigned removeBranch(MachineBasicBlock &MBB,
+                        int *BytesRemoved = nullptr) const override;
+
+  unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
+                        MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
+                        const DebugLoc &DL,
+                        int *BytesAdded = nullptr) const override;
+
+  void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
+                   const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg,
+                   bool KillSrc) const override;
+
+  bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx,
+                         unsigned &Size, unsigned &Offset,
+                         const MachineFunction &MF) const override;
+
+  void storeRegToStackSlot(MachineBasicBlock &MBB,
+                           MachineBasicBlock::iterator MI, Register SrcReg,
+                           bool IsKill, int FrameIndex,
+                           const TargetRegisterClass *RC,
+                           const TargetRegisterInfo *TRI) const override;
+
+  void loadRegFromStackSlot(MachineBasicBlock &MBB,
+                            MachineBasicBlock::iterator MI, Register DestReg,
+                            int FrameIndex, const TargetRegisterClass *RC,
+                            const TargetRegisterInfo *TRI) const override;
+
+  bool expandPostRAPseudo(MachineInstr &MI) const override;
+
+  bool isPCRelRegisterOperandLegal(const MachineOperand &MO) const override;
+
+  /// Add appropriate SExt nodes
+  void AddSExt(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+               DebugLoc DL, unsigned Reg, MVT From, MVT To) const;
+
+  /// Add appropriate ZExt nodes
+  void AddZExt(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+               DebugLoc DL, unsigned Reg, MVT From, MVT To) const;
+
+  /// Move across register classes without extension
+  bool ExpandMOVX_RR(MachineInstrBuilder &MIB, MVT MVTDst, MVT MVTSrc) const;
+
+  /// Move from register and extend
+  bool ExpandMOVSZX_RR(MachineInstrBuilder &MIB, bool IsSigned, MVT MVTDst,
+                       MVT MVTSrc) const;
+
+  /// Move from memory and extend
+  bool ExpandMOVSZX_RM(MachineInstrBuilder &MIB, bool IsSigned,
+                       const MCInstrDesc &Desc, MVT MVTDst, MVT MVTSrc) const;
+
+  /// Push/Pop to/from stack
+  bool ExpandPUSH_POP(MachineInstrBuilder &MIB, const MCInstrDesc &Desc,
+                      bool IsPush) const;
+
+  /// Moves to/from CCR
+  bool ExpandCCR(MachineInstrBuilder &MIB, bool IsToCCR) const;
+
+  /// Expand all MOVEM pseudos into real MOVEMs
+  bool ExpandMOVEM(MachineInstrBuilder &MIB, const MCInstrDesc &Desc,
+                   bool IsRM) const;
+
+  /// Return a virtual register initialized with the the global base register
+  /// value. Output instructions required to initialize the register in the
+  /// function entry block, if necessary.
+  unsigned getGlobalBaseReg(MachineFunction *MF) const;
+
+  std::pair<unsigned, unsigned>
+  decomposeMachineOperandsTargetFlags(unsigned TF) const override;
+
+  ArrayRef<std::pair<unsigned, const char *>>
+  getSerializableDirectMachineOperandTargetFlags() const override;
+};
+
+} // namespace llvm
+
+#endif

diff  --git a/llvm/lib/Target/M68k/M68kMCInstLower.cpp b/llvm/lib/Target/M68k/M68kMCInstLower.cpp
new file mode 100644
index 000000000000..f14361559b13
--- /dev/null
+++ b/llvm/lib/Target/M68k/M68kMCInstLower.cpp
@@ -0,0 +1,170 @@
+//===-- M68kMCInstLower.cpp - M68k MachineInstr to MCInst ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains code to lower M68k MachineInstrs to their
+/// corresponding MCInst records.
+///
+//===----------------------------------------------------------------------===//
+
+#include "M68kMCInstLower.h"
+
+#include "M68kAsmPrinter.h"
+#include "M68kInstrInfo.h"
+
+#include "MCTargetDesc/M68kBaseInfo.h"
+
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "m68k-mc-inst-lower"
+
+M68kMCInstLower::M68kMCInstLower(MachineFunction &MF, M68kAsmPrinter &AP)
+    : Ctx(MF.getContext()), MF(MF), TM(MF.getTarget()), MAI(*TM.getMCAsmInfo()),
+      AsmPrinter(AP) {}
+
+MCSymbol *
+M68kMCInstLower::GetSymbolFromOperand(const MachineOperand &MO) const {
+  assert((MO.isGlobal() || MO.isSymbol() || MO.isMBB()) &&
+         "Isn't a symbol reference");
+
+  const auto &TT = TM.getTargetTriple();
+  if (MO.isGlobal() && TT.isOSBinFormatELF())
+    return AsmPrinter.getSymbolPreferLocal(*MO.getGlobal());
+
+  const DataLayout &DL = MF.getDataLayout();
+
+  MCSymbol *Sym = nullptr;
+  SmallString<128> Name;
+  StringRef Suffix;
+
+  if (!Suffix.empty())
+    Name += DL.getPrivateGlobalPrefix();
+
+  if (MO.isGlobal()) {
+    const GlobalValue *GV = MO.getGlobal();
+    AsmPrinter.getNameWithPrefix(Name, GV);
+  } else if (MO.isSymbol()) {
+    Mangler::getNameWithPrefix(Name, MO.getSymbolName(), DL);
+  } else if (MO.isMBB()) {
+    assert(Suffix.empty());
+    Sym = MO.getMBB()->getSymbol();
+  }
+
+  Name += Suffix;
+  if (!Sym)
+    Sym = Ctx.getOrCreateSymbol(Name);
+
+  return Sym;
+}
+
+MCOperand M68kMCInstLower::LowerSymbolOperand(const MachineOperand &MO,
+                                              MCSymbol *Sym) const {
+  // FIXME We would like an efficient form for this, so we don't have to do a
+  // lot of extra uniquing. This fixme is originally from X86
+  const MCExpr *Expr = nullptr;
+  MCSymbolRefExpr::VariantKind RefKind = MCSymbolRefExpr::VK_None;
+
+  switch (MO.getTargetFlags()) {
+  default:
+    llvm_unreachable("Unknown target flag on GV operand");
+  case M68kII::MO_NO_FLAG:
+  case M68kII::MO_ABSOLUTE_ADDRESS:
+  case M68kII::MO_PC_RELATIVE_ADDRESS:
+    break;
+  case M68kII::MO_GOTPCREL:
+    RefKind = MCSymbolRefExpr::VK_GOTPCREL;
+    break;
+  case M68kII::MO_GOT:
+    RefKind = MCSymbolRefExpr::VK_GOT;
+    break;
+  case M68kII::MO_GOTOFF:
+    RefKind = MCSymbolRefExpr::VK_GOTOFF;
+    break;
+  case M68kII::MO_PLT:
+    RefKind = MCSymbolRefExpr::VK_PLT;
+    break;
+  }
+
+  if (!Expr) {
+    Expr = MCSymbolRefExpr::create(Sym, RefKind, Ctx);
+  }
+
+  if (!MO.isJTI() && !MO.isMBB() && MO.getOffset()) {
+    Expr = MCBinaryExpr::createAdd(
+        Expr, MCConstantExpr::create(MO.getOffset(), Ctx), Ctx);
+  }
+
+  return MCOperand::createExpr(Expr);
+}
+
+Optional<MCOperand>
+M68kMCInstLower::LowerOperand(const MachineInstr *MI,
+                              const MachineOperand &MO) const {
+  switch (MO.getType()) {
+  default:
+    llvm_unreachable("unknown operand type");
+  case MachineOperand::MO_Register:
+    // Ignore all implicit register operands.
+    if (MO.isImplicit())
+      return None;
+    return MCOperand::createReg(MO.getReg());
+  case MachineOperand::MO_Immediate:
+    return MCOperand::createImm(MO.getImm());
+  case MachineOperand::MO_MachineBasicBlock:
+  case MachineOperand::MO_GlobalAddress:
+  case MachineOperand::MO_ExternalSymbol:
+    return LowerSymbolOperand(MO, GetSymbolFromOperand(MO));
+  case MachineOperand::MO_MCSymbol:
+    return LowerSymbolOperand(MO, MO.getMCSymbol());
+  case MachineOperand::MO_JumpTableIndex:
+    return LowerSymbolOperand(MO, AsmPrinter.GetJTISymbol(MO.getIndex()));
+  case MachineOperand::MO_ConstantPoolIndex:
+    return LowerSymbolOperand(MO, AsmPrinter.GetCPISymbol(MO.getIndex()));
+  case MachineOperand::MO_BlockAddress:
+    return LowerSymbolOperand(
+        MO, AsmPrinter.GetBlockAddressSymbol(MO.getBlockAddress()));
+  case MachineOperand::MO_RegisterMask:
+    // Ignore call clobbers.
+    return None;
+  }
+}
+
+void M68kMCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
+  unsigned Opcode = MI->getOpcode();
+  OutMI.setOpcode(Opcode);
+
+  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+    const MachineOperand &MO = MI->getOperand(i);
+    Optional<MCOperand> MCOp = LowerOperand(MI, MO);
+
+    if (MCOp.hasValue() && MCOp.getValue().isValid())
+      OutMI.addOperand(MCOp.getValue());
+  }
+
+  // TAILJMPj, TAILJMPq - Lower to the correct jump instructions.
+  if (Opcode == M68k::TAILJMPj || Opcode == M68k::TAILJMPq) {
+    assert(OutMI.getNumOperands() == 1 && "Unexpected number of operands");
+    switch (Opcode) {
+    case M68k::TAILJMPj:
+      Opcode = M68k::JMP32j;
+      break;
+    case M68k::TAILJMPq:
+      Opcode = M68k::BRA8;
+      break;
+    }
+    OutMI.setOpcode(Opcode);
+  }
+}

diff  --git a/llvm/lib/Target/M68k/M68kMCInstLower.h b/llvm/lib/Target/M68k/M68kMCInstLower.h
new file mode 100644
index 000000000000..d6160629545e
--- /dev/null
+++ b/llvm/lib/Target/M68k/M68kMCInstLower.h
@@ -0,0 +1,54 @@
+//===-- M68kMCInstLower.h - Lower MachineInstr to MCInst -----*- C++ -*--===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains code to lower M68k MachineInstrs to their
+/// corresponding MCInst records.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_M68K_M68KMCINSTLOWER_H
+#define LLVM_LIB_TARGET_M68K_M68KMCINSTLOWER_H
+
+#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/Target/TargetMachine.h"
+
+namespace llvm {
+class MCContext;
+class MCInst;
+class MCOperand;
+class MachineInstr;
+class MachineFunction;
+class M68kAsmPrinter;
+
+/// This class is used to lower an MachineInstr into an MCInst.
+class M68kMCInstLower {
+  typedef MachineOperand::MachineOperandType MachineOperandType;
+  MCContext &Ctx;
+  MachineFunction &MF;
+  const TargetMachine &TM;
+  const MCAsmInfo &MAI;
+  M68kAsmPrinter &AsmPrinter;
+
+public:
+  M68kMCInstLower(MachineFunction &MF, M68kAsmPrinter &AP);
+
+  /// Lower an MO_GlobalAddress or MO_ExternalSymbol operand to an MCSymbol.
+  MCSymbol *GetSymbolFromOperand(const MachineOperand &MO) const;
+
+  MCOperand LowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const;
+
+  Optional<MCOperand> LowerOperand(const MachineInstr *MI,
+                                   const MachineOperand &MO) const;
+
+  void Lower(const MachineInstr *MI, MCInst &OutMI) const;
+};
+} // namespace llvm
+
+#endif

diff  --git a/llvm/lib/Target/M68k/M68kMachineFunction.cpp b/llvm/lib/Target/M68k/M68kMachineFunction.cpp
new file mode 100644
index 000000000000..3d048df7ba49
--- /dev/null
+++ b/llvm/lib/Target/M68k/M68kMachineFunction.cpp
@@ -0,0 +1,20 @@
+//===-- M68kMachineFunctionInfo.cpp - M68k private data ----*- C++ -*--===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "M68kMachineFunction.h"
+
+#include "M68kInstrInfo.h"
+#include "M68kSubtarget.h"
+
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/Function.h"
+
+using namespace llvm;
+
+void M68kMachineFunctionInfo::anchor() {}

diff  --git a/llvm/lib/Target/M68k/M68kMachineFunction.h b/llvm/lib/Target/M68k/M68kMachineFunction.h
new file mode 100644
index 000000000000..c6952f9dcd38
--- /dev/null
+++ b/llvm/lib/Target/M68k/M68kMachineFunction.h
@@ -0,0 +1,115 @@
+//===-- M68kMachineFunctionInfo.h - M68k private data ---------*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file declares the M68k specific subclass of MachineFunctionInfo.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_M68K_M68KMACHINEFUNCTION_H
+#define LLVM_LIB_TARGET_M68K_M68KMACHINEFUNCTION_H
+
+#include "llvm/CodeGen/CallingConvLower.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/Support/MachineValueType.h"
+
+namespace llvm {
+
+class M68kMachineFunctionInfo : public MachineFunctionInfo {
+  MachineFunction &MF;
+
+  /// Non-zero if the function has base pointer and makes call to
+  /// llvm.eh.sjlj.setjmp. When non-zero, the value is a displacement from the
+  /// frame pointer to a slot where the base pointer is stashed.
+  signed char RestoreBasePointerOffset = 0;
+
+  /// Size of the callee-saved register portion of the stack frame in bytes.
+  unsigned CalleeSavedFrameSize = 0;
+
+  /// Number of bytes function pops on return (in addition to the space used by
+  /// the return address).  Used on windows platform for stdcall & fastcall
+  /// name decoration
+  unsigned BytesToPopOnReturn = 0;
+
+  /// FrameIndex for return slot.
+  int ReturnAddrIndex = 0;
+
+  /// The number of bytes by which return address stack slot is moved as the
+  /// result of tail call optimization.
+  int TailCallReturnAddrDelta = 0;
+
+  /// keeps track of the virtual register initialized for use as the global
+  /// base register. This is used for PIC in some PIC relocation models.
+  unsigned GlobalBaseReg = 0;
+
+  /// FrameIndex for start of varargs area.
+  int VarArgsFrameIndex = 0;
+
+  /// Keeps track of whether this function uses sequences of pushes to pass
+  /// function parameters.
+  bool HasPushSequences = false;
+
+  /// Some subtargets require that sret lowering includes
+  /// returning the value of the returned struct in a register. This field
+  /// holds the virtual register into which the sret argument is passed.
+  unsigned SRetReturnReg = 0;
+
+  /// A list of virtual and physical registers that must be forwarded to every
+  /// musttail call.
+  SmallVector<ForwardedRegister, 1> ForwardedMustTailRegParms;
+
+  /// The number of bytes on stack consumed by the arguments being passed on
+  /// the stack.
+  unsigned ArgumentStackSize = 0;
+
+public:
+  M68kMachineFunctionInfo() = default;
+  explicit M68kMachineFunctionInfo(MachineFunction &MF) : MF(MF) {}
+
+  bool getRestoreBasePointer() const { return RestoreBasePointerOffset != 0; }
+  void setRestoreBasePointer(const MachineFunction *MF);
+  int getRestoreBasePointerOffset() const { return RestoreBasePointerOffset; }
+
+  unsigned getCalleeSavedFrameSize() const { return CalleeSavedFrameSize; }
+  void setCalleeSavedFrameSize(unsigned bytes) { CalleeSavedFrameSize = bytes; }
+
+  unsigned getBytesToPopOnReturn() const { return BytesToPopOnReturn; }
+  void setBytesToPopOnReturn(unsigned bytes) { BytesToPopOnReturn = bytes; }
+
+  int getRAIndex() const { return ReturnAddrIndex; }
+  void setRAIndex(int Index) { ReturnAddrIndex = Index; }
+
+  int getTCReturnAddrDelta() const { return TailCallReturnAddrDelta; }
+  void setTCReturnAddrDelta(int delta) { TailCallReturnAddrDelta = delta; }
+
+  unsigned getGlobalBaseReg() const { return GlobalBaseReg; }
+  void setGlobalBaseReg(unsigned Reg) { GlobalBaseReg = Reg; }
+
+  int getVarArgsFrameIndex() const { return VarArgsFrameIndex; }
+  void setVarArgsFrameIndex(int Index) { VarArgsFrameIndex = Index; }
+
+  bool getHasPushSequences() const { return HasPushSequences; }
+  void setHasPushSequences(bool HasPush) { HasPushSequences = HasPush; }
+
+  unsigned getSRetReturnReg() const { return SRetReturnReg; }
+  void setSRetReturnReg(unsigned Reg) { SRetReturnReg = Reg; }
+
+  unsigned getArgumentStackSize() const { return ArgumentStackSize; }
+  void setArgumentStackSize(unsigned size) { ArgumentStackSize = size; }
+
+  SmallVectorImpl<ForwardedRegister> &getForwardedMustTailRegParms() {
+    return ForwardedMustTailRegParms;
+  }
+
+private:
+  virtual void anchor();
+};
+
+} // end of namespace llvm
+
+#endif // M68K_MACHINE_FUNCTION_INFO_H

diff  --git a/llvm/lib/Target/M68k/M68kRegisterInfo.cpp b/llvm/lib/Target/M68k/M68kRegisterInfo.cpp
new file mode 100644
index 000000000000..8ed28780cf3a
--- /dev/null
+++ b/llvm/lib/Target/M68k/M68kRegisterInfo.cpp
@@ -0,0 +1,261 @@
+//===-- M68kRegisterInfo.cpp - CPU0 Register Information -----*- C++ -*--===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains the CPU0 implementation of the TargetRegisterInfo class.
+///
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "m68k-reg-info"
+
+#include "M68kRegisterInfo.h"
+
+#include "M68k.h"
+#include "M68kMachineFunction.h"
+#include "M68kSubtarget.h"
+
+#include "MCTargetDesc/M68kMCTargetDesc.h"
+
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Type.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+
+#define GET_REGINFO_TARGET_DESC
+#include "M68kGenRegisterInfo.inc"
+
+using namespace llvm;
+
+static cl::opt<bool> EnableBasePointer(
+    "m68k-use-base-pointer", cl::Hidden, cl::init(true),
+    cl::desc("Enable use of a base pointer for complex stack frames"));
+
+// Pin the vtable to this file.
+void M68kRegisterInfo::anchor() {}
+
+M68kRegisterInfo::M68kRegisterInfo(const M68kSubtarget &ST)
+    // FIXME x26 not sure it this the correct value, it expects RA, but M68k
+    // passes IP anyway, how this works?
+    : M68kGenRegisterInfo(M68k::A0, 0, 0, M68k::PC), Subtarget(ST) {
+  StackPtr = M68k::SP;
+  FramePtr = M68k::A6;
+  GlobalBasePtr = M68k::A5;
+  BasePtr = M68k::A4;
+}
+
+//===----------------------------------------------------------------------===//
+// Callee Saved Registers methods
+//===----------------------------------------------------------------------===//
+
+const MCPhysReg *
+M68kRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
+  return CSR_STD_SaveList;
+}
+
+const uint32_t *
+M68kRegisterInfo::getCallPreservedMask(const MachineFunction &MF,
+                                       CallingConv::ID) const {
+  return CSR_STD_RegMask;
+}
+
+const TargetRegisterClass *
+M68kRegisterInfo::getRegsForTailCall(const MachineFunction &MF) const {
+  return &M68k::XR32_TCRegClass;
+}
+
+unsigned
+M68kRegisterInfo::getMatchingMegaReg(unsigned Reg,
+                                     const TargetRegisterClass *RC) const {
+  for (MCSuperRegIterator Super(Reg, this); Super.isValid(); ++Super)
+    if (RC->contains(*Super))
+      return *Super;
+  return 0;
+}
+
+const TargetRegisterClass *
+M68kRegisterInfo::getMaximalPhysRegClass(unsigned reg, MVT VT) const {
+  assert(Register::isPhysicalRegister(reg) &&
+         "reg must be a physical register");
+
+  // Pick the most sub register class of the right type that contains
+  // this physreg.
+  const TargetRegisterClass *BestRC = nullptr;
+  for (regclass_iterator I = regclass_begin(), E = regclass_end(); I != E;
+       ++I) {
+    const TargetRegisterClass *RC = *I;
+    if ((VT == MVT::Other || isTypeLegalForClass(*RC, VT)) &&
+        RC->contains(reg) &&
+        (!BestRC ||
+         (BestRC->hasSubClass(RC) && RC->getNumRegs() > BestRC->getNumRegs())))
+      BestRC = RC;
+  }
+
+  assert(BestRC && "Couldn't find the register class");
+  return BestRC;
+}
+
+int M68kRegisterInfo::getRegisterOrder(unsigned Reg,
+                                       const TargetRegisterClass &TRC) const {
+  for (unsigned i = 0; i < TRC.getNumRegs(); ++i) {
+    if (regsOverlap(Reg, TRC.getRegister(i))) {
+      return i;
+    }
+  }
+  return -1;
+}
+
+int M68kRegisterInfo::getSpillRegisterOrder(unsigned Reg) const {
+  int Result = getRegisterOrder(Reg, *getRegClass(M68k::SPILLRegClassID));
+  assert(Result >= 0 && "Can not determine spill order");
+  return Result;
+}
+
+BitVector M68kRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
+  const M68kFrameLowering *TFI = getFrameLowering(MF);
+
+  BitVector Reserved(getNumRegs());
+
+  // Set a register's and its sub-registers and aliases as reserved.
+  auto setBitVector = [&Reserved, this](unsigned Reg) {
+    for (MCRegAliasIterator I(Reg, this, /* self */ true); I.isValid(); ++I) {
+      Reserved.set(*I);
+    }
+    for (MCSubRegIterator I(Reg, this, /* self */ true); I.isValid(); ++I) {
+      Reserved.set(*I);
+    }
+  };
+
+  setBitVector(M68k::PC);
+  setBitVector(M68k::SP);
+
+  if (TFI->hasFP(MF)) {
+    setBitVector(FramePtr);
+  }
+
+  // Set the base-pointer register and its aliases as reserved if needed.
+  if (hasBasePointer(MF)) {
+    CallingConv::ID CC = MF.getFunction().getCallingConv();
+    const uint32_t *RegMask = getCallPreservedMask(MF, CC);
+    if (MachineOperand::clobbersPhysReg(RegMask, getBaseRegister()))
+      report_fatal_error("Stack realignment in presence of dynamic allocas is "
+                         "not supported with"
+                         "this calling convention.");
+
+    setBitVector(getBaseRegister());
+  }
+
+  return Reserved;
+}
+
+void M68kRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
+                                           int SPAdj, unsigned FIOperandNum,
+                                           RegScavenger *RS) const {
+  MachineInstr &MI = *II;
+  MachineFunction &MF = *MI.getParent()->getParent();
+  const M68kFrameLowering *TFI = getFrameLowering(MF);
+
+  // We have either (i,An,Rn) or (i,An) EA form
+  // NOTE Base contains the FI and we need to backtrace a bit to get Disp
+  MachineOperand &Disp = MI.getOperand(FIOperandNum - 1);
+  MachineOperand &Base = MI.getOperand(FIOperandNum);
+
+  int Imm = (int)(Disp.getImm());
+  int FIndex = (int)(Base.getIndex());
+
+  // FIXME tail call: implement jmp from mem
+  bool AfterFPPop = false;
+
+  unsigned BasePtr;
+  if (hasBasePointer(MF))
+    BasePtr = (FIndex < 0 ? FramePtr : getBaseRegister());
+  else if (needsStackRealignment(MF))
+    BasePtr = (FIndex < 0 ? FramePtr : StackPtr);
+  else if (AfterFPPop)
+    BasePtr = StackPtr;
+  else
+    BasePtr = (TFI->hasFP(MF) ? FramePtr : StackPtr);
+
+  Base.ChangeToRegister(BasePtr, false);
+
+  // Now add the frame object offset to the offset from FP.
+  int64_t FIOffset;
+  Register IgnoredFrameReg;
+  if (AfterFPPop) {
+    // Tail call jmp happens after FP is popped.
+    const MachineFrameInfo &MFI = MF.getFrameInfo();
+    FIOffset = MFI.getObjectOffset(FIndex) - TFI->getOffsetOfLocalArea();
+  } else {
+    FIOffset =
+        TFI->getFrameIndexReference(MF, FIndex, IgnoredFrameReg).getFixed();
+  }
+
+  if (BasePtr == StackPtr)
+    FIOffset += SPAdj;
+
+  Disp.ChangeToImmediate(FIOffset + Imm);
+}
+
+bool M68kRegisterInfo::requiresRegisterScavenging(
+    const MachineFunction &MF) const {
+  return true;
+}
+
+bool M68kRegisterInfo::trackLivenessAfterRegAlloc(
+    const MachineFunction &MF) const {
+  return true;
+}
+
+static bool CantUseSP(const MachineFrameInfo &MFI) {
+  return MFI.hasVarSizedObjects() || MFI.hasOpaqueSPAdjustment();
+}
+
+bool M68kRegisterInfo::hasBasePointer(const MachineFunction &MF) const {
+  const MachineFrameInfo &MFI = MF.getFrameInfo();
+
+  if (!EnableBasePointer)
+    return false;
+
+  // When we need stack realignment, we can't address the stack from the frame
+  // pointer.  When we have dynamic allocas or stack-adjusting inline asm, we
+  // can't address variables from the stack pointer.  MS inline asm can
+  // reference locals while also adjusting the stack pointer.  When we can't
+  // use both the SP and the FP, we need a separate base pointer register.
+  bool CantUseFP = needsStackRealignment(MF);
+  return CantUseFP && CantUseSP(MFI);
+}
+
+bool M68kRegisterInfo::canRealignStack(const MachineFunction &MF) const {
+  if (!TargetRegisterInfo::canRealignStack(MF))
+    return false;
+
+  const MachineFrameInfo &MFI = MF.getFrameInfo();
+  const MachineRegisterInfo *MRI = &MF.getRegInfo();
+
+  // Stack realignment requires a frame pointer.  If we already started
+  // register allocation with frame pointer elimination, it is too late now.
+  if (!MRI->canReserveReg(FramePtr))
+    return false;
+
+  // If a base pointer is necessary. Check that it isn't too late to reserve it.
+  if (CantUseSP(MFI))
+    return MRI->canReserveReg(BasePtr);
+
+  return true;
+}
+
+Register M68kRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
+  const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
+  return TFI->hasFP(MF) ? FramePtr : StackPtr;
+}
+
+const TargetRegisterClass *M68kRegisterInfo::intRegClass(unsigned size) const {
+  return &M68k::DR32RegClass;
+}

diff  --git a/llvm/lib/Target/M68k/M68kRegisterInfo.h b/llvm/lib/Target/M68k/M68kRegisterInfo.h
new file mode 100644
index 000000000000..51b94294772c
--- /dev/null
+++ b/llvm/lib/Target/M68k/M68kRegisterInfo.h
@@ -0,0 +1,109 @@
+//===-- M68kRegisterInfo.h - M68k Register Information Impl --*- C++ --===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains the M68k implementation of the TargetRegisterInfo
+/// class.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_M68K_M68KREGISTERINFO_H
+#define LLVM_LIB_TARGET_M68K_M68KREGISTERINFO_H
+
+#include "M68k.h"
+
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+
+#define GET_REGINFO_HEADER
+#include "M68kGenRegisterInfo.inc"
+
+namespace llvm {
+class M68kSubtarget;
+class TargetInstrInfo;
+class Type;
+
+class M68kRegisterInfo : public M68kGenRegisterInfo {
+  virtual void anchor();
+
+  /// Physical register used as stack ptr.
+  unsigned StackPtr;
+
+  /// Physical register used as frame ptr.
+  unsigned FramePtr;
+
+  /// Physical register used as a base ptr in complex stack frames.  I.e., when
+  /// we need a 3rd base, not just SP and FP, due to variable size stack
+  /// objects.
+  unsigned BasePtr;
+
+  /// Physical register used to store GOT address if needed.
+  unsigned GlobalBasePtr;
+
+protected:
+  const M68kSubtarget &Subtarget;
+
+public:
+  M68kRegisterInfo(const M68kSubtarget &Subtarget);
+
+  const MCPhysReg *getCalleeSavedRegs(const MachineFunction *MF) const override;
+
+  const uint32_t *getCallPreservedMask(const MachineFunction &MF,
+                                       CallingConv::ID) const override;
+
+  /// Returns a register class with registers that can be used in forming tail
+  /// calls.
+  const TargetRegisterClass *
+  getRegsForTailCall(const MachineFunction &MF) const;
+
+  /// Return a mega-register of the specified register Reg so its sub-register
+  /// of index SubIdx is Reg, its super(or mega) Reg. In other words it will
+  /// return a register that is not direct super register but still shares
+  /// physical register with Reg.
+  /// NOTE not sure about the term though.
+  unsigned getMatchingMegaReg(unsigned Reg,
+                              const TargetRegisterClass *RC) const;
+
+  /// Returns the Register Class of a physical register of the given type,
+  /// picking the biggest register class of the right type that contains this
+  /// physreg.
+  const TargetRegisterClass *getMaximalPhysRegClass(unsigned reg, MVT VT) const;
+
+  /// Return index of a register within a register class, otherwise return -1
+  int getRegisterOrder(unsigned Reg, const TargetRegisterClass &TRC) const;
+
+  /// Return spill order index of a register, if there is none then trap
+  int getSpillRegisterOrder(unsigned Reg) const;
+
+  BitVector getReservedRegs(const MachineFunction &MF) const override;
+
+  bool requiresRegisterScavenging(const MachineFunction &MF) const override;
+
+  bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const override;
+
+  /// FrameIndex represent objects inside a abstract stack. We must replace
+  /// FrameIndex with an stack/frame pointer direct reference.
+  void eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj,
+                           unsigned FIOperandNum,
+                           RegScavenger *RS = nullptr) const override;
+
+  bool hasBasePointer(const MachineFunction &MF) const;
+
+  /// True if the stack can be realigned for the target.
+  bool canRealignStack(const MachineFunction &MF) const override;
+
+  Register getFrameRegister(const MachineFunction &MF) const override;
+  unsigned getStackRegister() const { return StackPtr; }
+  unsigned getBaseRegister() const { return BasePtr; }
+  unsigned getGlobalBaseRegister() const { return GlobalBasePtr; }
+
+  const TargetRegisterClass *intRegClass(unsigned Size) const;
+};
+
+} // end namespace llvm
+
+#endif

diff  --git a/llvm/lib/Target/M68k/M68kSubtarget.cpp b/llvm/lib/Target/M68k/M68kSubtarget.cpp
new file mode 100644
index 000000000000..6886e1cf95a4
--- /dev/null
+++ b/llvm/lib/Target/M68k/M68kSubtarget.cpp
@@ -0,0 +1,232 @@
+//===-- M68kSubtarget.cpp - M68k Subtarget Information ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements the M68k specific subclass of TargetSubtargetInfo.
+///
+//===----------------------------------------------------------------------===//
+
+#include "M68kSubtarget.h"
+
+#include "M68k.h"
+#include "M68kMachineFunction.h"
+#include "M68kRegisterInfo.h"
+#include "M68kTargetMachine.h"
+
+#include "llvm/CodeGen/MachineJumpTableInfo.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/TargetRegistry.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "m68k-subtarget"
+
+#define GET_SUBTARGETINFO_TARGET_DESC
+#define GET_SUBTARGETINFO_CTOR
+#include "M68kGenSubtargetInfo.inc"
+
+extern bool FixGlobalBaseReg;
+
+/// Select the M68k CPU for the given triple and cpu name.
+static StringRef selectM68kCPU(Triple TT, StringRef CPU) {
+  if (CPU.empty() || CPU == "generic") {
+    CPU = "M68000";
+  }
+  return CPU;
+}
+
+void M68kSubtarget::anchor() {}
+
+M68kSubtarget::M68kSubtarget(const Triple &TT, StringRef CPU, StringRef FS,
+                             const M68kTargetMachine &TM)
+    : M68kGenSubtargetInfo(TT, CPU, /*TuneCPU*/ CPU, FS), TM(TM), TSInfo(),
+      InstrInfo(initializeSubtargetDependencies(CPU, TT, FS, TM)),
+      FrameLowering(*this, this->getStackAlignment()), TLInfo(TM, *this),
+      TargetTriple(TT) {}
+
+bool M68kSubtarget::isPositionIndependent() const {
+  return TM.isPositionIndependent();
+}
+
+bool M68kSubtarget::isLegalToCallImmediateAddr() const { return true; }
+
+bool M68kSubtarget::abiUsesSoftFloat() const { return true; }
+
+M68kSubtarget &M68kSubtarget::initializeSubtargetDependencies(
+    StringRef CPU, Triple TT, StringRef FS, const M68kTargetMachine &TM) {
+  std::string CPUName = selectM68kCPU(TT, CPU).str();
+
+  // Parse features string.
+  ParseSubtargetFeatures(CPUName, CPUName, FS);
+
+  // Initialize scheduling itinerary for the specified CPU.
+  InstrItins = getInstrItineraryForCPU(CPUName);
+
+  stackAlignment = 8;
+
+  return *this;
+}
+
+//===----------------------------------------------------------------------===//
+// Code Model
+//
+// Key assumptions:
+//  - Whenever possible we use pc-rel encoding since it is smaller(16 bit) than
+//    absolute(32 bit).
+//  - GOT is reachable within 16 bit offset for both Small and Medium models.
+//  - Code section is reachable within 16 bit offset for both models.
+//
+//  ---------------------+-------------------------+--------------------------
+//                       |          Small          |          Medium
+//                       +-------------------------+------------+-------------
+//                       |   Static   |    PIC     |   Static   |    PIC
+//  ---------------------+------------+------------+------------+-------------
+//                branch |   pc-rel   |   pc-rel   |   pc-rel   |   pc-rel
+//  ---------------------+------------+------------+------------+-------------
+//           call global |    @PLT    |    @PLT    |    @PLT    |    @PLT
+//  ---------------------+------------+------------+------------+-------------
+//         call internal |   pc-rel   |   pc-rel   |   pc-rel   |   pc-rel
+//  ---------------------+------------+------------+------------+-------------
+//            data local |   pc-rel   |   pc-rel   |  ~pc-rel   |  ^pc-rel
+//  ---------------------+------------+------------+------------+-------------
+//       data local big* |   pc-rel   |   pc-rel   |  absolute  |  @GOTOFF
+//  ---------------------+------------+------------+------------+-------------
+//           data global |   pc-rel   |  @GOTPCREL |  ~pc-rel   |  @GOTPCREL
+//  ---------------------+------------+------------+------------+-------------
+//      data global big* |   pc-rel   |  @GOTPCREL |  absolute  |  @GOTPCREL
+//  ---------------------+------------+------------+------------+-------------
+//
+// * Big data potentially cannot be reached within 16 bit offset and requires
+//   special handling for old(x00 and x10) CPUs. Normally these symbols go into
+//   separate .ldata section which mapped after normal .data and .text, but I
+//   don't really know how this must be done for M68k atm... will try to dig
+//   this info out from GCC. For now CPUs prior to M68020 will use static ref
+//   for Static Model and @GOT based references for PIC.
+//
+// ~ These are absolute for older CPUs for now.
+// ^ These are @GOTOFF for older CPUs for now.
+//===----------------------------------------------------------------------===//
+
+/// Classify a blockaddress reference for the current subtarget according to how
+/// we should reference it in a non-pcrel context.
+unsigned char M68kSubtarget::classifyBlockAddressReference() const {
+  // Unless we start to support Large Code Model branching is always pc-rel
+  return M68kII::MO_PC_RELATIVE_ADDRESS;
+}
+
+unsigned char
+M68kSubtarget::classifyLocalReference(const GlobalValue *GV) const {
+  switch (TM.getCodeModel()) {
+  default:
+    llvm_unreachable("Unsupported code model");
+  case CodeModel::Small:
+  case CodeModel::Kernel: {
+    return M68kII::MO_PC_RELATIVE_ADDRESS;
+  }
+  case CodeModel::Medium: {
+    if (isPositionIndependent()) {
+      // On M68020 and better we can fit big any data offset into dips field.
+      if (atLeastM68020()) {
+        return M68kII::MO_PC_RELATIVE_ADDRESS;
+      }
+      // Otherwise we could check the data size and make sure it will fit into
+      // 16 bit offset. For now we will be conservative and go with @GOTOFF
+      return M68kII::MO_GOTOFF;
+    } else {
+      if (atLeastM68020()) {
+        return M68kII::MO_PC_RELATIVE_ADDRESS;
+      }
+      return M68kII::MO_ABSOLUTE_ADDRESS;
+    }
+  }
+  }
+}
+
+unsigned char M68kSubtarget::classifyExternalReference(const Module &M) const {
+  if (TM.shouldAssumeDSOLocal(M, nullptr))
+    return classifyLocalReference(nullptr);
+
+  if (isPositionIndependent())
+    return M68kII::MO_GOTPCREL;
+
+  return M68kII::MO_GOT;
+}
+
+unsigned char
+M68kSubtarget::classifyGlobalReference(const GlobalValue *GV) const {
+  return classifyGlobalReference(GV, *GV->getParent());
+}
+
+unsigned char M68kSubtarget::classifyGlobalReference(const GlobalValue *GV,
+                                                     const Module &M) const {
+  if (TM.shouldAssumeDSOLocal(M, GV))
+    return classifyLocalReference(GV);
+
+  switch (TM.getCodeModel()) {
+  default:
+    llvm_unreachable("Unsupported code model");
+  case CodeModel::Small:
+  case CodeModel::Kernel: {
+    if (isPositionIndependent())
+      return M68kII::MO_GOTPCREL;
+    return M68kII::MO_PC_RELATIVE_ADDRESS;
+  }
+  case CodeModel::Medium: {
+    if (isPositionIndependent())
+      return M68kII::MO_GOTPCREL;
+
+    if (atLeastM68020())
+      return M68kII::MO_PC_RELATIVE_ADDRESS;
+
+    return M68kII::MO_ABSOLUTE_ADDRESS;
+  }
+  }
+}
+
+unsigned M68kSubtarget::getJumpTableEncoding() const {
+  if (isPositionIndependent()) {
+    // The only time we want to use GOTOFF(used when with EK_Custom32) is when
+    // the potential delta between the jump target and table base can be larger
+    // than displacement field, which is True for older CPUs(16 bit disp)
+    // in Medium model(can have large data way beyond 16 bit).
+    if (TM.getCodeModel() == CodeModel::Medium && !atLeastM68020())
+      return MachineJumpTableInfo::EK_Custom32;
+
+    return MachineJumpTableInfo::EK_LabelDifference32;
+  }
+
+  // In non-pic modes, just use the address of a block.
+  return MachineJumpTableInfo::EK_BlockAddress;
+}
+
+unsigned char
+M68kSubtarget::classifyGlobalFunctionReference(const GlobalValue *GV) const {
+  return classifyGlobalFunctionReference(GV, *GV->getParent());
+}
+
+unsigned char
+M68kSubtarget::classifyGlobalFunctionReference(const GlobalValue *GV,
+                                               const Module &M) const {
+  // local always use pc-rel referencing
+  if (TM.shouldAssumeDSOLocal(M, GV))
+    return M68kII::MO_NO_FLAG;
+
+  // If the function is marked as non-lazy, generate an indirect call
+  // which loads from the GOT directly. This avoids run-time overhead
+  // at the cost of eager binding.
+  auto *F = dyn_cast_or_null<Function>(GV);
+  if (F && F->hasFnAttribute(Attribute::NonLazyBind)) {
+    return M68kII::MO_GOTPCREL;
+  }
+
+  // otherwise linker will figure this out
+  return M68kII::MO_PLT;
+}

diff  --git a/llvm/lib/Target/M68k/M68kSubtarget.h b/llvm/lib/Target/M68k/M68kSubtarget.h
new file mode 100644
index 000000000000..631b27e1698f
--- /dev/null
+++ b/llvm/lib/Target/M68k/M68kSubtarget.h
@@ -0,0 +1,157 @@
+//===-- M68kSubtarget.h - Define Subtarget for the M68k -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file declares the M68k specific subclass of TargetSubtargetInfo.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_CPU0_M68KSUBTARGET_H
+#define LLVM_LIB_TARGET_CPU0_M68KSUBTARGET_H
+
+#include "M68kFrameLowering.h"
+#include "M68kISelLowering.h"
+#include "M68kInstrInfo.h"
+
+#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
+#include "llvm/CodeGen/TargetSubtargetInfo.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/MC/MCInstrItineraries.h"
+#include "llvm/Support/Alignment.h"
+
+#include <string>
+
+#define GET_SUBTARGETINFO_HEADER
+#include "M68kGenSubtargetInfo.inc"
+
+extern bool M68kReserveGP;
+extern bool M68kNoCpload;
+
+namespace llvm {
+class StringRef;
+
+class M68kTargetMachine;
+
+class M68kSubtarget : public M68kGenSubtargetInfo {
+  virtual void anchor();
+
+protected:
+  // These define which ISA is supported. Since each Motorola M68k ISA is
+  // built on top of the previous one whenever an ISA is selected the previous
+  // selected as well.
+  enum SubtargetEnum { M00, M10, M20, M30, M40, M60 };
+  SubtargetEnum SubtargetKind = M00;
+
+  InstrItineraryData InstrItins;
+
+  /// Small section is used.
+  bool UseSmallSection = true;
+
+  const M68kTargetMachine &TM;
+
+  SelectionDAGTargetInfo TSInfo;
+  M68kInstrInfo InstrInfo;
+  M68kFrameLowering FrameLowering;
+  M68kTargetLowering TLInfo;
+
+  /// The minimum alignment known to hold of the stack frame on
+  /// entry to the function and which must be maintained by every function.
+  unsigned stackAlignment = 8;
+
+  Triple TargetTriple;
+
+public:
+  /// This constructor initializes the data members to match that
+  /// of the specified triple.
+  M68kSubtarget(const Triple &TT, StringRef CPU, StringRef FS,
+                const M68kTargetMachine &_TM);
+
+  /// Parses features string setting specified subtarget options.  Definition
+  /// of function is auto generated by tblgen.
+  void ParseSubtargetFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS);
+
+  bool atLeastM68000() const { return SubtargetKind >= M00; }
+  bool atLeastM68010() const { return SubtargetKind >= M10; }
+  bool atLeastM68020() const { return SubtargetKind >= M20; }
+  bool atLeastM68030() const { return SubtargetKind >= M30; }
+  bool atLeastM68040() const { return SubtargetKind >= M40; }
+  bool atLeastM68060() const { return SubtargetKind >= M60; }
+
+  bool useSmallSection() const { return UseSmallSection; }
+
+  bool abiUsesSoftFloat() const;
+
+  const Triple &getTargetTriple() const { return TargetTriple; }
+
+  bool isTargetELF() const { return TargetTriple.isOSBinFormatELF(); }
+
+  /// Return true if the subtarget allows calls to immediate address.
+  bool isLegalToCallImmediateAddr() const;
+
+  bool isPositionIndependent() const;
+
+  /// Classify a global variable reference for the current subtarget according
+  /// to how we should reference it in a non-pcrel context.
+  unsigned char classifyLocalReference(const GlobalValue *GV) const;
+
+  /// Classify a global variable reference for the current subtarget according
+  /// to how we should reference it in a non-pcrel context.
+  unsigned char classifyGlobalReference(const GlobalValue *GV,
+                                        const Module &M) const;
+  unsigned char classifyGlobalReference(const GlobalValue *GV) const;
+
+  /// Classify a external variable reference for the current subtarget according
+  /// to how we should reference it in a non-pcrel context.
+  unsigned char classifyExternalReference(const Module &M) const;
+
+  /// Classify a global function reference for the current subtarget.
+  unsigned char classifyGlobalFunctionReference(const GlobalValue *GV,
+                                                const Module &M) const;
+  unsigned char classifyGlobalFunctionReference(const GlobalValue *GV) const;
+
+  /// Classify a blockaddress reference for the current subtarget according to
+  /// how we should reference it in a non-pcrel context.
+  unsigned char classifyBlockAddressReference() const;
+
+  unsigned getJumpTableEncoding() const;
+
+  /// TODO this must be controlled by options like -malign-int and -mshort
+  Align getStackAlignment() const { return Align(stackAlignment); }
+
+  /// getSlotSize - Stack slot size in bytes.
+  unsigned getSlotSize() const { return 4; }
+
+  M68kSubtarget &initializeSubtargetDependencies(StringRef CPU, Triple TT,
+                                                 StringRef FS,
+                                                 const M68kTargetMachine &TM);
+
+  const SelectionDAGTargetInfo *getSelectionDAGInfo() const override {
+    return &TSInfo;
+  }
+
+  const M68kInstrInfo *getInstrInfo() const override { return &InstrInfo; }
+
+  const M68kFrameLowering *getFrameLowering() const override {
+    return &FrameLowering;
+  }
+
+  const M68kRegisterInfo *getRegisterInfo() const override {
+    return &InstrInfo.getRegisterInfo();
+  }
+
+  const M68kTargetLowering *getTargetLowering() const override {
+    return &TLInfo;
+  }
+
+  const InstrItineraryData *getInstrItineraryData() const override {
+    return &InstrItins;
+  }
+};
+} // namespace llvm
+
+#endif

diff  --git a/llvm/lib/Target/M68k/M68kTargetMachine.cpp b/llvm/lib/Target/M68k/M68kTargetMachine.cpp
index a40ae10a832b..7476866156ae 100644
--- a/llvm/lib/Target/M68k/M68kTargetMachine.cpp
+++ b/llvm/lib/Target/M68k/M68kTargetMachine.cpp
@@ -11,7 +11,148 @@
 ///
 //===----------------------------------------------------------------------===//
 
-/// This is just a placeholder to make current
-/// commit buildable. Body of this function will
-/// be filled in later commits
-extern "C" void LLVMInitializeM68kTarget() {}
+#include "M68kTargetMachine.h"
+#include "M68k.h"
+
+#include "M68kSubtarget.h"
+#include "M68kTargetObjectFile.h"
+
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/Support/TargetRegistry.h"
+#include <memory>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "m68k"
+
+extern "C" void LLVMInitializeM68kTarget() {
+  RegisterTargetMachine<M68kTargetMachine> X(TheM68kTarget);
+}
+
+namespace {
+
+std::string computeDataLayout(const Triple &TT, StringRef CPU,
+                              const TargetOptions &Options) {
+  std::string Ret = "";
+  // M68k is Big Endian
+  Ret += "E";
+
+  // FIXME how to wire it with the used object format?
+  Ret += "-m:e";
+
+  // M68k pointers are always 32 bit wide even for 16 bit cpus
+  Ret += "-p:32:32";
+
+  // M68k requires i8 to align on 2 byte boundry
+  Ret += "-i8:8:8-i16:16:16-i32:16:32";
+
+  // FIXME no floats at the moment
+
+  // The registers can hold 8, 16, 32 bits
+  Ret += "-n8:16:32";
+
+  Ret += "-a:0:16-S16";
+
+  return Ret;
+}
+
+Reloc::Model getEffectiveRelocModel(const Triple &TT,
+                                    Optional<Reloc::Model> RM) {
+  // If not defined we default to static
+  if (!RM.hasValue()) {
+    return Reloc::Static;
+  }
+
+  return *RM;
+}
+
+CodeModel::Model getEffectiveCodeModel(Optional<CodeModel::Model> CM,
+                                       bool JIT) {
+  if (!CM) {
+    return CodeModel::Small;
+  } else if (CM == CodeModel::Large) {
+    llvm_unreachable("Large code model is not supported");
+  } else if (CM == CodeModel::Kernel) {
+    llvm_unreachable("Kernel code model is not implemented yet");
+  }
+  return CM.getValue();
+}
+} // end anonymous namespace
+
+M68kTargetMachine::M68kTargetMachine(const Target &T, const Triple &TT,
+                                     StringRef CPU, StringRef FS,
+                                     const TargetOptions &Options,
+                                     Optional<Reloc::Model> RM,
+                                     Optional<CodeModel::Model> CM,
+                                     CodeGenOpt::Level OL, bool JIT)
+    : LLVMTargetMachine(T, computeDataLayout(TT, CPU, Options), TT, CPU, FS,
+                        Options, getEffectiveRelocModel(TT, RM),
+                        ::getEffectiveCodeModel(CM, JIT), OL),
+      TLOF(std::make_unique<M68kELFTargetObjectFile>()),
+      Subtarget(TT, CPU, FS, *this) {
+  initAsmInfo();
+}
+
+M68kTargetMachine::~M68kTargetMachine() {}
+
+const M68kSubtarget *
+M68kTargetMachine::getSubtargetImpl(const Function &F) const {
+  Attribute CPUAttr = F.getFnAttribute("target-cpu");
+  Attribute FSAttr = F.getFnAttribute("target-features");
+
+  auto CPU = CPUAttr.isValid() ? CPUAttr.getValueAsString().str() : TargetCPU;
+  auto FS = FSAttr.isValid() ? FSAttr.getValueAsString().str() : TargetFS;
+
+  auto &I = SubtargetMap[CPU + FS];
+  if (!I) {
+    // This needs to be done before we create a new subtarget since any
+    // creation will depend on the TM and the code generation flags on the
+    // function that reside in TargetOptions.
+    resetTargetOptions(F);
+    I = std::make_unique<M68kSubtarget>(TargetTriple, CPU, FS, *this);
+  }
+  return I.get();
+}
+
+//===----------------------------------------------------------------------===//
+// Pass Pipeline Configuration
+//===----------------------------------------------------------------------===//
+
+namespace {
+class M68kPassConfig : public TargetPassConfig {
+public:
+  M68kPassConfig(M68kTargetMachine &TM, PassManagerBase &PM)
+      : TargetPassConfig(TM, PM) {}
+
+  M68kTargetMachine &getM68kTargetMachine() const {
+    return getTM<M68kTargetMachine>();
+  }
+
+  const M68kSubtarget &getM68kSubtarget() const {
+    return *getM68kTargetMachine().getSubtargetImpl();
+  }
+
+  bool addInstSelector() override;
+  void addPreSched2() override;
+  void addPreEmitPass() override;
+};
+} // namespace
+
+TargetPassConfig *M68kTargetMachine::createPassConfig(PassManagerBase &PM) {
+  return new M68kPassConfig(*this, PM);
+}
+
+bool M68kPassConfig::addInstSelector() {
+  // Install an instruction selector.
+  addPass(createM68kISelDag(getM68kTargetMachine()));
+  addPass(createM68kGlobalBaseRegPass());
+  return false;
+}
+
+void M68kPassConfig::addPreSched2() { addPass(createM68kExpandPseudoPass()); }
+
+void M68kPassConfig::addPreEmitPass() {
+  addPass(createM68kCollapseMOVEMPass());
+}

diff  --git a/llvm/lib/Target/M68k/M68kTargetMachine.h b/llvm/lib/Target/M68k/M68kTargetMachine.h
new file mode 100644
index 000000000000..34fae8e45504
--- /dev/null
+++ b/llvm/lib/Target/M68k/M68kTargetMachine.h
@@ -0,0 +1,56 @@
+//===-- M68kTargetMachine.h - Define TargetMachine for M68k ----- C++ -===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file declares the M68k specific subclass of TargetMachine.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_M68K_M68KTARGETMACHINE_H
+#define LLVM_LIB_TARGET_M68K_M68KTARGETMACHINE_H
+
+#include "M68kSubtarget.h"
+#include "MCTargetDesc/M68kMCTargetDesc.h"
+
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/SelectionDAGISel.h"
+#include "llvm/CodeGen/TargetFrameLowering.h"
+#include "llvm/Target/TargetMachine.h"
+
+namespace llvm {
+class formatted_raw_ostream;
+class M68kRegisterInfo;
+
+class M68kTargetMachine : public LLVMTargetMachine {
+  std::unique_ptr<TargetLoweringObjectFile> TLOF;
+  M68kSubtarget Subtarget;
+
+  mutable StringMap<std::unique_ptr<M68kSubtarget>> SubtargetMap;
+
+public:
+  M68kTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
+                    StringRef FS, const TargetOptions &Options,
+                    Optional<Reloc::Model> RM, Optional<CodeModel::Model> CM,
+                    CodeGenOpt::Level OL, bool JIT);
+
+  ~M68kTargetMachine() override;
+
+  const M68kSubtarget *getSubtargetImpl() const { return &Subtarget; }
+
+  const M68kSubtarget *getSubtargetImpl(const Function &F) const override;
+
+  // Pass Pipeline Configuration
+  TargetPassConfig *createPassConfig(PassManagerBase &PM) override;
+
+  TargetLoweringObjectFile *getObjFileLowering() const override {
+    return TLOF.get();
+  }
+};
+} // namespace llvm
+
+#endif

diff  --git a/llvm/lib/Target/M68k/M68kTargetObjectFile.cpp b/llvm/lib/Target/M68k/M68kTargetObjectFile.cpp
new file mode 100644
index 000000000000..3e26b37e7760
--- /dev/null
+++ b/llvm/lib/Target/M68k/M68kTargetObjectFile.cpp
@@ -0,0 +1,48 @@
+//===-- M68kELFTargetObjectFile.cpp - M68k Object Files -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains definitions for M68k ELF object file lowering.
+///
+//===----------------------------------------------------------------------===//
+
+#include "M68kTargetObjectFile.h"
+
+#include "M68kSubtarget.h"
+#include "M68kTargetMachine.h"
+
+#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCSectionELF.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Target/TargetMachine.h"
+
+using namespace llvm;
+
+static cl::opt<unsigned> SSThreshold(
+    "m68k-ssection-threshold", cl::Hidden,
+    cl::desc("Small data and bss section threshold size (default=8)"),
+    cl::init(8));
+
+void M68kELFTargetObjectFile::Initialize(MCContext &Ctx,
+                                         const TargetMachine &TM) {
+  TargetLoweringObjectFileELF::Initialize(Ctx, TM);
+  InitializeELF(TM.Options.UseInitArray);
+
+  this->TM = &static_cast<const M68kTargetMachine &>(TM);
+
+  // FIXME do we need `.sdata` and `.sbss` explicitly?
+  SmallDataSection = getContext().getELFSection(
+      ".sdata", ELF::SHT_PROGBITS, ELF::SHF_WRITE | ELF::SHF_ALLOC);
+
+  SmallBSSSection = getContext().getELFSection(".sbss", ELF::SHT_NOBITS,
+                                               ELF::SHF_WRITE | ELF::SHF_ALLOC);
+}

diff  --git a/llvm/lib/Target/M68k/M68kTargetObjectFile.h b/llvm/lib/Target/M68k/M68kTargetObjectFile.h
new file mode 100644
index 000000000000..dbc5375d5423
--- /dev/null
+++ b/llvm/lib/Target/M68k/M68kTargetObjectFile.h
@@ -0,0 +1,31 @@
+//===-- M68kELFTargetObjectFile.h - M68k Object Info ---------*- C++ -====//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains declarations for M68k ELF object file lowering.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_M68K_M68KTARGETOBJECTFILE_H
+#define LLVM_LIB_TARGET_M68K_M68KTARGETOBJECTFILE_H
+
+#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
+
+namespace llvm {
+class M68kTargetMachine;
+class M68kELFTargetObjectFile : public TargetLoweringObjectFileELF {
+  const M68kTargetMachine *TM;
+  MCSection *SmallDataSection;
+  MCSection *SmallBSSSection;
+
+public:
+  void Initialize(MCContext &Ctx, const TargetMachine &TM) override;
+};
+} // end namespace llvm
+
+#endif


        


More information about the llvm-commits mailing list